Compare commits

..

7 Commits

Author SHA1 Message Date
girish@cloudron.io
ed0ecac7a1 0.9.2 changes 2016-02-25 20:53:36 -08:00
Johannes Zellner
12ad14aad2 add 0.9.1 changes 2016-02-25 20:52:05 -08:00
girish@cloudron.io
9a7520cb9c allow 1.2 times RAM
This is basically to allow 2 phabricators and another small app with
no warning on a 4gb droplet :-)
2016-02-25 20:44:25 -08:00
girish@cloudron.io
3b75f04a6c round memory to nearest GB
os.totalmem returns some close-to-GB number. Because of this two
apps with 2GB don't install on 4GB.
2016-02-25 18:10:13 -08:00
girish@cloudron.io
587dddb752 Edit -> Save 2016-02-25 18:09:07 -08:00
Girish Ramakrishnan
d7b012076b do not set memoryLimit 2016-02-25 18:08:47 -08:00
Johannes Zellner
fab5e36bfe Warn the user about installing too many apps, but give an override button 2016-02-25 18:07:54 -08:00
719 changed files with 52764 additions and 67281 deletions

View File

@@ -1,29 +0,0 @@
{
"env": {
"node": true,
"es6": true
},
"extends": "eslint:recommended",
"parserOptions": {
"ecmaVersion": 2020
},
"rules": {
"indent": [
"error",
4
],
"linebreak-style": [
"error",
"unix"
],
"quotes": [
"error",
"single"
],
"semi": [
"error",
"always"
],
"no-console": "off"
}
}

4
.gitattributes vendored
View File

@@ -1,7 +1,7 @@
# following files are skipped when exporting using git archive
/release export-ignore
/admin export-ignore
test export-ignore
.jshintrc export-ignore
.gitlab export-ignore
.gitattributes export-ignore
.gitignore export-ignore

4
.gitignore vendored
View File

@@ -1,9 +1,11 @@
node_modules/
coverage/
.nyc_output/
docs/
webadmin/dist/
setup/splash/website/
installer/src/certs/server.key
# vim swap files
*.swp

View File

@@ -1,6 +0,0 @@
Please do not use this issue tracker for support requests and bug reports.
This issue tracker is used by the Cloudron development team to track actual
bugs in the code.
Please use the forum at https://forum.cloudron.io to report bugs. For
confidential issues, please email us at support@cloudron.io.

View File

@@ -1,7 +0,0 @@
Please do not use this issue tracker for support requests and feature reports.
This issue tracker is used by the Cloudron development team to track issues in
the code.
Please use the forum at https://forum.cloudron.io to report bugs. For
confidential issues, please email us at support@cloudron.io.

View File

@@ -1,5 +1,7 @@
{
"node": true,
"unused": true,
"esversion": 11
"node": true,
"browser": true,
"unused": true,
"globalstrict": true,
"predef": [ "angular", "$" ]
}

2078
CHANGES

File diff suppressed because it is too large Load Diff

35
LICENSE
View File

@@ -1,35 +0,0 @@
The Cloudron Subscription license
Copyright (c) 2022 Cloudron UG
With regard to the Cloudron Software:
This software and associated documentation files (the "Software") may only be
used in production, if you (and any entity that you represent) have agreed to,
and are in compliance with, the Cloudron Subscription Terms of Service, available
at https://cloudron.io/legal/terms.html (the “Subscription Terms”), or other
agreement governing the use of the Software, as agreed by you and Cloudron,
and otherwise have a valid Cloudron Subscription. Subject to the foregoing sentence,
you are free to modify this Software and publish patches to the Software. You agree
that Subscription and/or its licensors (as applicable) retain all right, title and
interest in and to all such modifications and/or patches, and all such modifications
and/or patches may only be used, copied, modified, displayed, distributed, or otherwise
exploited with a valid Cloudron subscription. Notwithstanding the foregoing, you may copy
and modify the Software for development and testing purposes, without requiring a
subscription. You agree that Cloudron and/or its licensors (as applicable) retain
all right, title and interest in and to all such modifications. You are not
granted any other rights beyond what is expressly stated herein. Subject to the
foregoing, it is forbidden to copy, merge, publish, distribute, sublicense,
and/or sell the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
For all third party components incorporated into the Cloudron Software, those
components are licensed under the original license provided by the owner of the
applicable component.

View File

@@ -1,84 +1,17 @@
![Translation status](https://translate.cloudron.io/widgets/cloudron/-/svg-badge.svg)
Cloudron a Smart Server
=======================
# Cloudron
[Cloudron](https://cloudron.io) is the best way to run apps on your server.
Web applications like email, contacts, blog, chat are the backbone of the modern
internet. Yet, we live in a world where hosting these essential applications is
a complex task.
Selfhost Instructions
---------------------
We are building the ultimate platform for self-hosting web apps. The Cloudron allows
anyone to effortlessly host web applications on their server on their own terms.
The smart server currently relies on an AWS account with access to Route53 and S3 and is tested on DigitalOcean and EC2.
## Features
* Single click install for apps. Check out the [App Store](https://cloudron.io/appstore.html).
* Per-app encrypted backups and restores.
* App updates delivered via the App Store.
* Secure - Cloudron manages the firewall. All apps are secured with HTTPS. Certificates are
installed and renewed automatically.
* Centralized User & Group management. Control who can access which app.
* Single Sign On. Use same credentials across all apps.
* Automatic updates for the Cloudron platform.
* Trivially migrate to another server keeping your apps and data (for example, switch your
infrastructure provider or move to a bigger server).
* Comprehensive [REST API](https://docs.cloudron.io/api/).
* [CLI](https://docs.cloudron.io/custom-apps/cli/) to configure apps.
* Alerts, audit logs, graphs, dns management ... and much more
## Demo
Try our demo at https://my.demo.cloudron.io (username: cloudron password: cloudron).
## Installing
[Install script](https://docs.cloudron.io/installation/) - [Pricing](https://cloudron.io/pricing.html)
**Note:** This repo is a small part of what gets installed on your server - there is
the dashboard, database addons, graph container, base image etc. Cloudron also relies
on external services such as the App Store for apps to be installed. As such, don't
clone this repo and npm install and expect something to work.
## Development
This is the backend code of Cloudron. The frontend code is [here](https://git.cloudron.io/cloudron/dashboard).
The way to develop is to first install a full instance of Cloudron in a VM. Then you can use the [hotfix](https://git.cloudron.io/cloudron/cloudron-machine)
tool to patch the VM with the latest code.
First create a virtual private server with Ubuntu 15.04 and run the following commands in an ssh session to initialize the base image:
```
SSH_PASSPHRASE=sshkeypassword cloudron-machine hotfix --cloudron my.example.com --release 6.0.0 --ssh-key keyname
curl https://s3.amazonaws.com/prod-cloudron-releases/installer.sh -o installer.sh
chmod +x installer.sh
./installer.sh <domain> <aws access key> <aws acccess secret> <backup bucket> <provider> <release sha1>
```
## License
Please note that the Cloudron code is under a source-available license. This is not the same as an
open source license but ensures the code is available for introspection (and hacking!).
## Contributions
Just to give some heads up, we are a bit restrictive in merging changes. We are a small team and
would like to keep our maintenance burden low. It might be best to discuss features first in the [forum](https://forum.cloudron.io),
to also figure out how many other people will use it to justify maintenance for a feature.
# Localization
![Translation status](https://translate.cloudron.io/widgets/cloudron/-/287x66-white.png)
## Support
* [Documentation](https://docs.cloudron.io/)
* [Forum](https://forum.cloudron.io/)

View File

@@ -1 +0,0 @@
# release version. do not edit manually

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 5.5 KiB

186
baseimage/createImage Executable file
View File

@@ -0,0 +1,186 @@
#!/bin/bash
set -eu -o pipefail
assertNotEmpty() {
: "${!1:? "$1 is not set."}"
}
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)"
export JSON="${SOURCE_DIR}/node_modules/.bin/json"
provider="digitalocean"
installer_revision=$(git rev-parse HEAD)
box_name=""
server_id=""
server_ip=""
destroy_server="yes"
deploy_env="dev"
# Only GNU getopt supports long options. OS X comes bundled with the BSD getopt
# brew install gnu-getopt to get the GNU getopt on OS X
[[ $(uname -s) == "Darwin" ]] && GNU_GETOPT="/usr/local/opt/gnu-getopt/bin/getopt" || GNU_GETOPT="getopt"
readonly GNU_GETOPT
args=$(${GNU_GETOPT} -o "" -l "provider:,revision:,regions:,size:,name:,no-destroy,env:" -n "$0" -- "$@")
eval set -- "${args}"
while true; do
case "$1" in
--env) deploy_env="$2"; shift 2;;
--revision) installer_revision="$2"; shift 2;;
--provider) provider="$2"; shift 2;;
--name) box_name="$2"; destroy_server="no"; shift 2;;
--no-destroy) destroy_server="no"; shift 2;;
--) break;;
*) echo "Unknown option $1"; exit 1;;
esac
done
echo "Creating image using ${provider}"
if [[ "${provider}" == "digitalocean" ]]; then
if [[ "${deploy_env}" == "staging" ]]; then
assertNotEmpty DIGITAL_OCEAN_TOKEN_STAGING
export DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_STAGING}"
elif [[ "${deploy_env}" == "dev" ]]; then
assertNotEmpty DIGITAL_OCEAN_TOKEN_DEV
export DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_DEV}"
elif [[ "${deploy_env}" == "prod" ]]; then
assertNotEmpty DIGITAL_OCEAN_TOKEN_PROD
export DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_PROD}"
else
echo "No such env ${deploy_env}."
exit 1
fi
vps="/bin/bash ${SCRIPT_DIR}/digitalocean.sh"
else
echo "Unknown provider : ${provider}"
exit 1
fi
readonly ssh_keys="${HOME}/.ssh/id_rsa_caas_${deploy_env}"
readonly scp202="scp -P 202 -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
readonly scp22="scp -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
readonly ssh202="ssh -p 202 -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
readonly ssh22="ssh -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
if [[ ! -f "${ssh_keys}" ]]; then
echo "caas ssh key is missing at ${ssh_keys} (pick it up from secrets repo)"
exit 1
fi
function get_pretty_revision() {
local git_rev="$1"
local sha1=$(git rev-parse --short "${git_rev}" 2>/dev/null)
echo "${sha1}"
}
now=$(date "+%Y-%m-%d-%H%M%S")
pretty_revision=$(get_pretty_revision "${installer_revision}")
if [[ -z "${box_name}" ]]; then
# if you change this, change the regexp is appstore/janitor.js
box_name="box-${deploy_env}-${pretty_revision}-${now}" # remove slashes
# create a new server if no name given
if ! caas_ssh_key_id=$($vps get_ssh_key_id "caas"); then
echo "Could not query caas ssh key"
exit 1
fi
echo "Detected caas ssh key id: ${caas_ssh_key_id}"
echo "Creating Server with name [${box_name}]"
if ! server_id=$($vps create ${caas_ssh_key_id} ${box_name}); then
echo "Failed to create server"
exit 1
fi
echo "Created server with id: ${server_id}"
# If we run scripts overenthusiastically without the wait, setup script randomly fails
echo -n "Waiting 120 seconds for server creation"
for i in $(seq 1 24); do
echo -n "."
sleep 5
done
echo ""
else
if ! server_id=$($vps get_id "${box_name}"); then
echo "Could not determine id from name"
exit 1
fi
echo "Reusing server with id: ${server_id}"
$vps power_on "${server_id}"
fi
# Query until we get an IP
while true; do
echo "Trying to get the server IP"
if server_ip=$($vps get_ip "${server_id}"); then
echo "Server IP : [${server_ip}]"
break
fi
echo "Timedout, trying again in 10 seconds"
sleep 10
done
while true; do
echo "Trying to copy init script to server"
if $scp22 "${SCRIPT_DIR}/initializeBaseUbuntuImage.sh" root@${server_ip}:.; then
break
fi
echo "Timedout, trying again in 30 seconds"
sleep 30
done
echo "Copying INFRA_VERSION"
$scp22 "${SCRIPT_DIR}/../setup/INFRA_VERSION" root@${server_ip}:.
echo "Copying box source"
cd "${SOURCE_DIR}"
git archive --format=tar HEAD | $ssh22 "root@${server_ip}" "cat - > /tmp/box.tar.gz"
echo "Executing init script"
if ! $ssh22 "root@${server_ip}" "/bin/bash /root/initializeBaseUbuntuImage.sh ${installer_revision}"; then
echo "Init script failed"
exit 1
fi
echo "Shutting down server with id : ${server_id}"
$ssh202 "root@${server_ip}" "shutdown -f now" || true # shutdown sometimes terminates ssh connection immediately making this command fail
# wait 10 secs for actual shutdown
echo "Waiting for 10 seconds for server to shutdown"
sleep 30
echo "Powering off server"
if ! $vps power_off "${server_id}"; then
echo "Could not power off server"
exit 1
fi
snapshot_name="box-${deploy_env}-${pretty_revision}-${now}"
echo "Snapshotting as ${snapshot_name}"
if ! image_id=$($vps snapshot "${server_id}" "${snapshot_name}"); then
echo "Could not snapshot and get image id"
exit 1
fi
if [[ "${destroy_server}" == "yes" ]]; then
echo "Destroying server"
if ! $vps destroy "${server_id}"; then
echo "Could not destroy server"
exit 1
fi
else
echo "Skipping server destroy"
fi
echo "Transferring image ${image_id} to other regions"
$vps transfer_image_to_all_regions "${image_id}"
echo "Done."

240
baseimage/digitalocean.sh Normal file
View File

@@ -0,0 +1,240 @@
#!/bin/bash
if [[ -z "${DIGITAL_OCEAN_TOKEN}" ]]; then
echo "Script requires DIGITAL_OCEAN_TOKEN env to be set"
exit 1
fi
if [[ -z "${JSON}" ]]; then
echo "Script requires JSON env to be set to path of JSON binary"
exit 1
fi
readonly CURL="curl -s -u ${DIGITAL_OCEAN_TOKEN}:"
function debug() {
echo "$@" >&2
}
function get_ssh_key_id() {
id=$($CURL "https://api.digitalocean.com/v2/account/keys" \
| $JSON ssh_keys \
| $JSON -c "this.name === \"$1\"" \
| $JSON 0.id)
[[ -z "$id" ]] && exit 1
echo "$id"
}
function create_droplet() {
local ssh_key_id="$1"
local box_name="$2"
local image_region="sfo1"
local ubuntu_image_slug="ubuntu-15-10-x64"
local box_size="512mb"
local data="{\"name\":\"${box_name}\",\"size\":\"${box_size}\",\"region\":\"${image_region}\",\"image\":\"${ubuntu_image_slug}\",\"ssh_keys\":[ \"${ssh_key_id}\" ],\"backups\":false}"
id=$($CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets" | $JSON droplet.id)
[[ -z "$id" ]] && exit 1
echo "$id"
}
function get_droplet_ip() {
local droplet_id="$1"
ip=$($CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}" | $JSON "droplet.networks.v4[0].ip_address")
[[ -z "$ip" ]] && exit 1
echo "$ip"
}
function get_droplet_id() {
local droplet_name="$1"
id=$($CURL "https://api.digitalocean.com/v2/droplets?per_page=100" | $JSON "droplets" | $JSON -c "this.name === '${droplet_name}'" | $JSON "[0].id")
[[ -z "$id" ]] && exit 1
echo "$id"
}
function power_off_droplet() {
local droplet_id="$1"
local data='{"type":"power_off"}'
local response=$($CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions")
local event_id=`echo "${response}" | $JSON action.id`
if [[ -z "${event_id}" ]]; then
debug "Got no event id, assuming already powered off."
debug "Response: ${response}"
return
fi
debug "Powered off droplet. Event id: ${event_id}"
debug -n "Waiting for droplet to power off"
while true; do
local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status`
if [[ "${event_status}" == "completed" ]]; then
break
fi
debug -n "."
sleep 10
done
debug ""
}
function power_on_droplet() {
local droplet_id="$1"
local data='{"type":"power_on"}'
local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions" | $JSON action.id`
debug "Powered on droplet. Event id: ${event_id}"
if [[ -z "${event_id}" ]]; then
debug "Got no event id, assuming already powered on"
return
fi
debug -n "Waiting for droplet to power on"
while true; do
local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status`
if [[ "${event_status}" == "completed" ]]; then
break
fi
debug -n "."
sleep 10
done
debug ""
}
function get_image_id() {
local snapshot_name="$1"
local image_id=""
image_id=$($CURL "https://api.digitalocean.com/v2/images?per_page=100" \
| $JSON images \
| $JSON -c "this.name === \"${snapshot_name}\"" 0.id)
if [[ -n "${image_id}" ]]; then
echo "${image_id}"
fi
}
function snapshot_droplet() {
local droplet_id="$1"
local snapshot_name="$2"
local data="{\"type\":\"snapshot\",\"name\":\"${snapshot_name}\"}"
local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions" | $JSON action.id`
debug "Droplet snapshotted as ${snapshot_name}. Event id: ${event_id}"
debug -n "Waiting for snapshot to complete"
while true; do
local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status`
if [[ "${event_status}" == "completed" ]]; then
break
fi
debug -n "."
sleep 10
done
debug ""
get_image_id "${snapshot_name}"
}
function destroy_droplet() {
local droplet_id="$1"
# TODO: check for 204 status
$CURL -X DELETE "https://api.digitalocean.com/v2/droplets/${droplet_id}"
debug "Droplet destroyed"
debug ""
}
function transfer_image() {
local image_id="$1"
local region_slug="$2"
local data="{\"type\":\"transfer\",\"region\":\"${region_slug}\"}"
local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/images/${image_id}/actions" | $JSON action.id`
echo "${event_id}"
}
function wait_for_image_event() {
local image_id="$1"
local event_id="$2"
debug -n "Waiting for ${event_id}"
while true; do
local event_status=`$CURL "https://api.digitalocean.com/v2/images/${image_id}/actions/${event_id}" | $JSON action.status`
if [[ "${event_status}" == "completed" ]]; then
break
fi
debug -n "."
sleep 10
done
debug ""
}
function transfer_image_to_all_regions() {
local image_id="$1"
xfer_events=()
image_regions=(ams3) ## sfo1 is where the image is created
for image_region in ${image_regions[@]}; do
xfer_event=$(transfer_image ${image_id} ${image_region})
echo "Image transfer to ${image_region} initiated. Event id: ${xfer_event}"
xfer_events+=("${xfer_event}")
sleep 1
done
echo "Image transfer initiated, but they will take some time to get transferred."
for xfer_event in ${xfer_events[@]}; do
$vps wait_for_image_event "${image_id}" "${xfer_event}"
done
}
if [[ $# -lt 1 ]]; then
debug "<command> <params...>"
exit 1
fi
case $1 in
get_ssh_key_id)
get_ssh_key_id "${@:2}"
;;
create)
create_droplet "${@:2}"
;;
get_id)
get_droplet_id "${@:2}"
;;
get_ip)
get_droplet_ip "${@:2}"
;;
power_on)
power_on_droplet "${@:2}"
;;
power_off)
power_off_droplet "${@:2}"
;;
snapshot)
snapshot_droplet "${@:2}"
;;
destroy)
destroy_droplet "${@:2}"
;;
transfer_image_to_all_regions)
transfer_image_to_all_regions "${@:2}"
;;
*)
echo "Unknown command $1"
exit 1
esac

View File

@@ -0,0 +1,325 @@
#!/bin/bash
set -euv -o pipefail
readonly USER=yellowtent
readonly USER_HOME="/home/${USER}"
readonly INSTALLER_SOURCE_DIR="${USER_HOME}/installer"
readonly INSTALLER_REVISION="$1"
readonly SELFHOSTED=$(( $# > 1 ? 1 : 0 ))
readonly USER_DATA_FILE="/root/user_data.img"
readonly USER_DATA_DIR="/home/yellowtent/data"
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
function die {
echo $1
exit 1
}
[[ "$(systemd --version 2>&1)" == *"systemd 225"* ]] || die "Expecting systemd to be 225"
if [ -f "${SOURCE_DIR}/INFRA_VERSION" ]; then
source "${SOURCE_DIR}/INFRA_VERSION"
else
echo "No INFRA_VERSION found, skip pulling docker images"
fi
if [ ${SELFHOSTED} == 0 ]; then
echo "!! Initializing Ubuntu image for CaaS"
else
echo "!! Initializing Ubuntu image for Selfhosting"
fi
echo "==== Create User ${USER} ===="
if ! id "${USER}"; then
useradd "${USER}" -m
fi
echo "=== Yellowtent base image preparation (installer revision - ${INSTALLER_REVISION}) ==="
echo "=== Prepare installer source ==="
rm -rf "${INSTALLER_SOURCE_DIR}" && mkdir -p "${INSTALLER_SOURCE_DIR}"
rm -rf /tmp/box && mkdir -p /tmp/box
tar xvf /tmp/box.tar.gz -C /tmp/box && rm /tmp/box.tar.gz
cp -rf /tmp/box/installer/* "${INSTALLER_SOURCE_DIR}"
echo "${INSTALLER_REVISION}" > "${INSTALLER_SOURCE_DIR}/REVISION"
export DEBIAN_FRONTEND=noninteractive
echo "=== Upgrade ==="
apt-get update
apt-get upgrade -y
apt-get install -y curl
# Setup firewall before everything. docker creates it's own chain and the -X below will remove it
# Do NOT use iptables-persistent because it's startup ordering conflicts with docker
echo "=== Setting up firewall ==="
# clear tables and set default policy
iptables -F # flush all chains
iptables -X # delete all chains
# default policy for filter table
iptables -P INPUT DROP
iptables -P FORWARD ACCEPT # TODO: disable icc and make this as reject
iptables -P OUTPUT ACCEPT
# NOTE: keep these in sync with src/apps.js validatePortBindings
# allow ssh, http, https, ping, dns
iptables -I INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
if [ ${SELFHOSTED} == 0 ]; then
iptables -A INPUT -p tcp -m tcp -m multiport --dports 80,202,443,886 -j ACCEPT
else
iptables -A INPUT -p tcp -m tcp -m multiport --dports 80,22,443,886 -j ACCEPT
fi
iptables -A INPUT -p icmp --icmp-type echo-request -j ACCEPT
iptables -A INPUT -p icmp --icmp-type echo-reply -j ACCEPT
iptables -A INPUT -p udp --sport 53 -j ACCEPT
iptables -A INPUT -s 172.17.0.0/16 -j ACCEPT # required to accept any connections from apps to our IP:<public port>
# loopback
iptables -A INPUT -i lo -j ACCEPT
iptables -A OUTPUT -o lo -j ACCEPT
# prevent DoS
# iptables -A INPUT -p tcp --dport 80 -m limit --limit 25/minute --limit-burst 100 -j ACCEPT
# log dropped incoming. keep this at the end of all the rules
iptables -N LOGGING # new chain
iptables -A INPUT -j LOGGING # last rule in INPUT chain
iptables -A LOGGING -m limit --limit 2/min -j LOG --log-prefix "IPTables Packet Dropped: " --log-level 7
iptables -A LOGGING -j DROP
echo "==== Install btrfs tools ==="
apt-get -y install btrfs-tools
echo "==== Install docker ===="
# install docker from binary to pin it to a specific version. the current debian repo does not allow pinning
curl https://get.docker.com/builds/Linux/x86_64/docker-1.9.1 > /usr/bin/docker
chmod +x /usr/bin/docker
groupadd docker
cat > /etc/systemd/system/docker.socket <<EOF
[Unit]
Description=Docker Socket for the API
PartOf=docker.service
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
cat > /etc/systemd/system/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
After=network.target docker.socket
Requires=docker.socket
[Service]
ExecStart=/usr/bin/docker daemon -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs
MountFlags=slave
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
[Install]
WantedBy=multi-user.target
EOF
echo "=== Setup btrfs data ==="
fallocate -l "8192m" "${USER_DATA_FILE}" # 8gb start (this will get resized dynamically by box-setup.service)
mkfs.btrfs -L UserHome "${USER_DATA_FILE}"
echo "${USER_DATA_FILE} ${USER_DATA_DIR} btrfs loop,nosuid 0 0" >> /etc/fstab
mkdir -p "${USER_DATA_DIR}" && mount "${USER_DATA_FILE}"
systemctl daemon-reload
systemctl enable docker
systemctl start docker
# give docker sometime to start up and create iptables rules
# those rules come in after docker has started, and we want to wait for them to be sure iptables-save has all of them
sleep 10
# Disable forwarding to metadata route from containers
iptables -I FORWARD -d 169.254.169.254 -j DROP
# ubuntu will restore iptables from this file automatically. this is here so that docker's chain is saved to this file
mkdir /etc/iptables && iptables-save > /etc/iptables/rules.v4
echo "=== Enable memory accounting =="
sed -e 's/GRUB_CMDLINE_LINUX=.*/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1 panic_on_oops=1 panic=5"/' -i /etc/default/grub
update-grub
# now add the user to the docker group
usermod "${USER}" -a -G docker
if [ -z $(echo "${INFRA_VERSION}") ]; then
echo "Skip pulling base docker images"
else
echo "=== Pulling base docker images ==="
docker pull "${BASE_IMAGE}"
echo "=== Pulling mysql addon image ==="
docker pull "${MYSQL_IMAGE}"
echo "=== Pulling postgresql addon image ==="
docker pull "${POSTGRESQL_IMAGE}"
echo "=== Pulling redis addon image ==="
docker pull "${REDIS_IMAGE}"
echo "=== Pulling mongodb addon image ==="
docker pull "${MONGODB_IMAGE}"
echo "=== Pulling graphite docker images ==="
docker pull "${GRAPHITE_IMAGE}"
echo "=== Pulling mail relay ==="
docker pull "${MAIL_IMAGE}"
fi
echo "==== Install nginx ===="
apt-get -y install nginx-full
[[ "$(nginx -v 2>&1)" == *"nginx/1.9."* ]] || die "Expecting nginx version to be 1.9.x"
echo "==== Install build-essential ===="
apt-get -y install build-essential rcconf
echo "==== Install mysql ===="
debconf-set-selections <<< 'mysql-server mysql-server/root_password password password'
debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password password'
apt-get -y install mysql-server
[[ "$(mysqld --version 2>&1)" == *"5.6."* ]] || die "Expecting nginx version to be 5.6.x"
echo "==== Install pwgen ===="
apt-get -y install pwgen
echo "==== Install collectd ==="
if ! apt-get install -y collectd collectd-utils; then
# FQDNLookup is true in default debian config. The box code has a custom collectd.conf that fixes this
echo "Failed to install collectd. Presumably because of http://mailman.verplant.org/pipermail/collectd/2015-March/006491.html"
sed -e 's/^FQDNLookup true/FQDNLookup false/' -i /etc/collectd/collectd.conf
fi
update-rc.d -f collectd remove
# this simply makes it explicit that we run logrotate via cron. it's already part of base ubuntu
echo "==== Install logrotate ==="
apt-get install -y cron logrotate
systemctl enable cron
echo "==== Install nodejs ===="
# Cannot use anything above 4.1.1 - https://github.com/nodejs/node/issues/3803
mkdir -p /usr/local/node-4.1.1
curl -sL https://nodejs.org/dist/v4.1.1/node-v4.1.1-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-4.1.1
ln -s /usr/local/node-4.1.1/bin/node /usr/bin/node
ln -s /usr/local/node-4.1.1/bin/npm /usr/bin/npm
apt-get install -y python # Install python which is required for npm rebuild
[[ "$(python --version 2>&1)" == "Python 2.7."* ]] || die "Expecting python version to be 2.7.x"
echo "=== Rebuilding npm packages ==="
cd "${INSTALLER_SOURCE_DIR}" && npm install --production
chown "${USER}:${USER}" -R "${INSTALLER_SOURCE_DIR}"
echo "==== Install installer systemd script ===="
provisionEnv="PROVISION=digitalocean"
if [ ${SELFHOSTED} == 1 ]; then
provisionEnv="PROVISION=local"
fi
cat > /etc/systemd/system/cloudron-installer.service <<EOF
[Unit]
Description=Cloudron Installer
; journald crashes result in a EPIPE in node. Cannot ignore it as it results in loss of logs.
BindsTo=systemd-journald.service
[Service]
Type=idle
ExecStart="${INSTALLER_SOURCE_DIR}/src/server.js"
Environment="DEBUG=installer*,connect-lastmile" ${provisionEnv}
; kill any child (installer.sh) as well
KillMode=control-group
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
# Restore iptables before docker
echo "==== Install iptables-restore systemd script ===="
cat > /etc/systemd/system/iptables-restore.service <<EOF
[Unit]
Description=IPTables Restore
Before=docker.service
[Service]
Type=oneshot
ExecStart=/sbin/iptables-restore /etc/iptables/rules.v4
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
EOF
# Allocate swap files
# https://bbs.archlinux.org/viewtopic.php?id=194792 ensures this runs after do-resize.service
echo "==== Install box-setup systemd script ===="
cat > /etc/systemd/system/box-setup.service <<EOF
[Unit]
Description=Box Setup
Before=docker.service umount.target collectd.service
After=do-resize.service
[Service]
Type=oneshot
ExecStart="${INSTALLER_SOURCE_DIR}/systemd/box-setup.sh"
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable cloudron-installer
systemctl enable iptables-restore
systemctl enable box-setup
# Configure systemd
sed -e "s/^#SystemMaxUse=.*$/SystemMaxUse=100M/" \
-e "s/^#ForwardToSyslog=.*$/ForwardToSyslog=no/" \
-i /etc/systemd/journald.conf
# When rotating logs, systemd kills journald too soon sometimes
# See https://github.com/systemd/systemd/issues/1353 (this is upstream default)
sed -e "s/^WatchdogSec=.*$/WatchdogSec=3min/" \
-i /lib/systemd/system/systemd-journald.service
sync
# Configure time
sed -e 's/^#NTP=/NTP=0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org/' -i /etc/systemd/timesyncd.conf
timedatectl set-ntp 1
timedatectl set-timezone UTC
# Give user access to system logs
apt-get -y install acl
usermod -a -G systemd-journal ${USER}
mkdir -p /var/log/journal # in some images, this directory is not created making system log to /run/systemd instead
chown root:systemd-journal /var/log/journal
systemctl restart systemd-journald
setfacl -n -m u:${USER}:r /var/log/journal/*/system.journal
if [ ${SELFHOSTED} == 0 ]; then
echo "==== Install ssh ==="
apt-get -y install openssh-server
# https://stackoverflow.com/questions/4348166/using-with-sed on why ? must be escaped
sed -e 's/^#\?Port .*/Port 202/g' \
-e 's/^#\?PermitRootLogin .*/PermitRootLogin without-password/g' \
-e 's/^#\?PermitEmptyPasswords .*/PermitEmptyPasswords no/g' \
-e 's/^#\?PasswordAuthentication .*/PasswordAuthentication no/g' \
-i /etc/ssh/sshd_config
# required so we can connect to this machine since port 22 is blocked by iptables by now
systemctl reload sshd
fi

119
box.js
View File

@@ -2,76 +2,63 @@
'use strict';
const fs = require('fs'),
require('supererror')({ splatchError: true });
// remove timestamp from debug() based output
require('debug').formatArgs = function formatArgs() {
arguments[0] = this.namespace + ' ' + arguments[0];
return arguments;
};
var appHealthMonitor = require('./src/apphealthmonitor.js'),
async = require('async'),
config = require('./src/config.js'),
ldap = require('./src/ldap.js'),
paths = require('./src/paths.js'),
proxyAuth = require('./src/proxyauth.js'),
safe = require('safetydance'),
oauthproxy = require('./src/oauthproxy.js'),
server = require('./src/server.js'),
settings = require('./src/settings.js'),
userdirectory = require('./src/userdirectory.js');
simpleauth = require('./src/simpleauth.js');
let logFd;
console.log();
console.log('==========================================');
console.log(' Cloudron will use the following settings ');
console.log('==========================================');
console.log();
console.log(' Environment: ', config.CLOUDRON ? 'CLOUDRON' : 'TEST');
console.log(' Version: ', config.version());
console.log(' Admin Origin: ', config.adminOrigin());
console.log(' Appstore API server origin: ', config.apiServerOrigin());
console.log(' Appstore Web server origin: ', config.webServerOrigin());
console.log();
console.log('==========================================');
console.log();
async function setupLogging() {
if (process.env.BOX_ENV === 'test') return;
async.series([
server.start,
ldap.start,
simpleauth.start,
appHealthMonitor.start,
oauthproxy.start
], function (error) {
if (error) {
console.error('Error starting server', error);
process.exit(1);
}
});
logFd = fs.openSync(paths.BOX_LOG_FILE, 'a');
// we used to write using a stream before but it caches internally and there is no way to flush it when things crash
process.stdout.write = process.stderr.write = function (...args) {
const callback = typeof args[args.length-1] === 'function' ? args.pop() : function () {}; // callback is required for fs.write
fs.write.apply(fs, [logFd, ...args, callback]);
};
}
var NOOP_CALLBACK = function () { };
// this is also used as the 'uncaughtException' handler which can only have synchronous functions
function exitSync(status) {
if (status.error) fs.write(logFd, status.error.stack + '\n', function () {});
fs.fsyncSync(logFd);
fs.closeSync(logFd);
process.exit(status.code);
}
process.on('SIGINT', function () {
server.stop(NOOP_CALLBACK);
ldap.stop(NOOP_CALLBACK);
simpleauth.stop(NOOP_CALLBACK);
oauthproxy.stop(NOOP_CALLBACK);
setTimeout(process.exit.bind(process), 3000);
});
async function startServers() {
await setupLogging();
await server.start(); // do this first since it also inits the database
await proxyAuth.start();
await ldap.start();
const conf = await settings.getUserDirectoryConfig();
if (conf.enabled) await userdirectory.start();
}
async function main() {
const [error] = await safe(startServers());
if (error) return exitSync({ error: new Error(`Error starting server: ${JSON.stringify(error)}`), code: 1 });
// require this here so that logging handler is already setup
const debug = require('debug')('box:box');
process.on('SIGINT', async function () {
debug('Received SIGINT. Shutting down.');
await proxyAuth.stop();
await server.stop();
await userdirectory.stop();
await ldap.stop();
setTimeout(process.exit.bind(process), 3000);
});
process.on('SIGTERM', async function () {
debug('Received SIGTERM. Shutting down.');
await proxyAuth.stop();
await server.stop();
await userdirectory.stop();
await ldap.stop();
setTimeout(process.exit.bind(process), 3000);
});
process.on('uncaughtException', (error) => exitSync({ error, code: 1 }));
console.log(`Cloudron is up and running. Logs are at ${paths.BOX_LOG_FILE}`); // this goes to journalctl
}
main();
process.on('SIGTERM', function () {
server.stop(NOOP_CALLBACK);
ldap.stop(NOOP_CALLBACK);
simpleauth.stop(NOOP_CALLBACK);
oauthproxy.stop(NOOP_CALLBACK);
setTimeout(process.exit.bind(process), 3000);
});

42
crashnotifier.js Executable file
View File

@@ -0,0 +1,42 @@
#!/usr/bin/env node
'use strict';
var assert = require('assert'),
mailer = require('./src/mailer.js'),
safe = require('safetydance'),
path = require('path'),
util = require('util');
var COLLECT_LOGS_CMD = path.join(__dirname, 'src/scripts/collectlogs.sh');
function collectLogs(program, callback) {
assert.strictEqual(typeof program, 'string');
assert.strictEqual(typeof callback, 'function');
var logs = safe.child_process.execSync('sudo ' + COLLECT_LOGS_CMD + ' ' + program, { encoding: 'utf8' });
callback(null, logs);
}
function sendCrashNotification(processName) {
collectLogs(processName, function (error, result) {
if (error) {
console.error('Failed to collect logs.', error);
result = util.format('Failed to collect logs.', error);
}
console.log('Sending crash notification email for', processName);
mailer.sendCrashNotification(processName, result);
});
}
function main() {
if (process.argv.length !== 3) return console.error('Usage: crashnotifier.js <processName>');
var processName = process.argv[2];
console.log('Started crash notifier for', processName);
sendCrashNotification(processName);
}
main();

View File

@@ -1,22 +0,0 @@
#!/usr/bin/env node
'use strict';
const database = require('./src/database.js');
const crashNotifier = require('./src/crashnotifier.js');
// This is triggered by systemd with the crashed unit name as argument
async function main() {
if (process.argv.length !== 3) return console.error('Usage: crashnotifier.js <unitName>');
const unitName = process.argv[2];
console.log('Started crash notifier for', unitName);
// eventlog api needs the db
await database.initialize();
await crashNotifier.sendFailureLogs(unitName);
}
main();

5
docs/makeDocs Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -eu
./node_modules/.bin/apidoc -i src/routes -o docs

155
gulpfile.js Normal file
View File

@@ -0,0 +1,155 @@
/* jslint node:true */
'use strict';
var ejs = require('gulp-ejs'),
gulp = require('gulp'),
del = require('del'),
concat = require('gulp-concat'),
uglify = require('gulp-uglify'),
serve = require('gulp-serve'),
sass = require('gulp-sass'),
sourcemaps = require('gulp-sourcemaps'),
cssnano = require('gulp-cssnano'),
autoprefixer = require('gulp-autoprefixer'),
argv = require('yargs').argv;
gulp.task('3rdparty', function () {
gulp.src([
'webadmin/src/3rdparty/**/*.js',
'webadmin/src/3rdparty/**/*.map',
'webadmin/src/3rdparty/**/*.css',
'webadmin/src/3rdparty/**/*.otf',
'webadmin/src/3rdparty/**/*.eot',
'webadmin/src/3rdparty/**/*.svg',
'webadmin/src/3rdparty/**/*.ttf',
'webadmin/src/3rdparty/**/*.woff',
'webadmin/src/3rdparty/**/*.woff2'
])
.pipe(gulp.dest('webadmin/dist/3rdparty/'))
.pipe(gulp.dest('setup/splash/website/3rdparty'));
gulp.src('node_modules/bootstrap-sass/assets/javascripts/bootstrap.min.js')
.pipe(gulp.dest('webadmin/dist/3rdparty/js'))
.pipe(gulp.dest('setup/splash/website/3rdparty/js'));
});
// --------------
// JavaScript
// --------------
gulp.task('js', ['js-index', 'js-setup', 'js-update'], function () {});
var oauth = {
clientId: argv.clientId || 'cid-webadmin',
clientSecret: argv.clientSecret || 'unused',
apiOrigin: argv.apiOrigin || ''
};
console.log();
console.log('Using OAuth credentials:');
console.log(' ClientId: %s', oauth.clientId);
console.log(' ClientSecret: %s', oauth.clientSecret);
console.log(' Cloudron API: %s', oauth.apiOrigin || 'default');
console.log();
gulp.task('js-index', function () {
gulp.src([
'webadmin/src/js/index.js',
'webadmin/src/js/client.js',
'webadmin/src/js/appstore.js',
'webadmin/src/js/main.js',
'webadmin/src/views/*.js'
])
.pipe(ejs({ oauth: oauth }, { ext: '.js' }))
.pipe(sourcemaps.init())
.pipe(concat('index.js', { newLine: ';' }))
.pipe(uglify())
.pipe(sourcemaps.write())
.pipe(gulp.dest('webadmin/dist/js'));
});
gulp.task('js-setup', function () {
gulp.src(['webadmin/src/js/setup.js', 'webadmin/src/js/client.js'])
.pipe(ejs({ oauth: oauth }, { ext: '.js' }))
.pipe(sourcemaps.init())
.pipe(concat('setup.js', { newLine: ';' }))
.pipe(uglify())
.pipe(sourcemaps.write())
.pipe(gulp.dest('webadmin/dist/js'));
});
gulp.task('js-update', function () {
gulp.src(['webadmin/src/js/update.js'])
.pipe(sourcemaps.init())
.pipe(uglify())
.pipe(sourcemaps.write())
.pipe(gulp.dest('webadmin/dist/js'))
.pipe(gulp.dest('setup/splash/website/js'));
});
// --------------
// HTML
// --------------
gulp.task('html', ['html-views', 'html-update', 'html-templates'], function () {
return gulp.src('webadmin/src/*.html').pipe(gulp.dest('webadmin/dist'));
});
gulp.task('html-update', function () {
return gulp.src(['webadmin/src/update.html']).pipe(gulp.dest('setup/splash/website'));
});
gulp.task('html-views', function () {
return gulp.src('webadmin/src/views/**/*.html').pipe(gulp.dest('webadmin/dist/views'));
});
gulp.task('html-templates', function () {
return gulp.src('webadmin/src/templates/**/*.html').pipe(gulp.dest('webadmin/dist/templates'));
});
// --------------
// CSS
// --------------
gulp.task('css', function () {
return gulp.src('webadmin/src/*.scss')
.pipe(sourcemaps.init())
.pipe(sass({ includePaths: ['node_modules/bootstrap-sass/assets/stylesheets/'] }).on('error', sass.logError))
.pipe(autoprefixer())
.pipe(cssnano())
.pipe(sourcemaps.write())
.pipe(gulp.dest('webadmin/dist'))
.pipe(gulp.dest('setup/splash/website'));
});
gulp.task('images', function () {
return gulp.src('webadmin/src/img/**')
.pipe(gulp.dest('webadmin/dist/img'));
});
// --------------
// Utilities
// --------------
gulp.task('watch', ['default'], function () {
gulp.watch(['webadmin/src/*.scss'], ['css']);
gulp.watch(['webadmin/src/img/*'], ['images']);
gulp.watch(['webadmin/src/**/*.html'], ['html']);
gulp.watch(['webadmin/src/views/*.html'], ['html-views']);
gulp.watch(['webadmin/src/templates/*.html'], ['html-templates']);
gulp.watch(['webadmin/src/js/update.js'], ['js-update']);
gulp.watch(['webadmin/src/js/setup.js', 'webadmin/src/js/client.js'], ['js-setup']);
gulp.watch(['webadmin/src/js/index.js', 'webadmin/src/js/client.js', 'webadmin/src/js/appstore.js', 'webadmin/src/js/main.js', 'webadmin/src/views/*.js'], ['js-index']);
gulp.watch(['webadmin/src/3rdparty/**/*'], ['3rdparty']);
});
gulp.task('clean', function () {
del.sync(['webadmin/dist', 'setup/splash/website']);
});
gulp.task('default', ['clean', 'html', 'js', '3rdparty', 'images', 'css'], function () {});
gulp.task('develop', ['watch'], serve({ root: 'webadmin/dist', port: 4000 }));

164
installer.sh.ejs Executable file
View File

@@ -0,0 +1,164 @@
#!/bin/bash
set -eu -o pipefail
echo ""
echo "======== Cloudron Installer ========"
echo ""
if [ $# -lt 4 ]; then
echo "Usage: ./installer.sh <fqdn> <aws key id> <aws key secret> <bucket> <provider> <revision>"
exit 1
fi
# commandline arguments
readonly fqdn="${1}"
readonly aws_access_key_id="${2}"
readonly aws_access_key_secret="${3}"
readonly aws_backup_bucket="${4}"
readonly provider="${5}"
readonly revision="${6}"
# environment specific urls
<% if (env === 'prod') { %>
readonly api_server_origin="https://api.cloudron.io"
readonly web_server_origin="https://cloudron.io"
<% } else { %>
readonly api_server_origin="https://api.<%= env %>.cloudron.io"
readonly web_server_origin="https://<%= env %>.cloudron.io"
<% } %>
readonly release_bucket_url="https://s3.amazonaws.com/<%= env %>-cloudron-releases"
readonly versions_url="https://s3.amazonaws.com/<%= env %>-cloudron-releases/versions.json"
readonly installer_code_url="${release_bucket_url}/box-${revision}.tar.gz"
# runtime consts
readonly installer_code_file="/tmp/box.tar.gz"
readonly installer_tmp_dir="/tmp/box"
readonly cert_folder="/tmp/certificates"
# check for fqdn in /ets/hosts
echo "[INFO] checking for hostname entry"
readonly hostentry_found=$(grep "${fqdn}" /etc/hosts || true)
if [[ -z $hostentry_found ]]; then
echo "[WARNING] No entry for ${fqdn} found in /etc/hosts"
echo "Adding an entry ..."
cat >> /etc/hosts <<EOF
# The following line was added by the Cloudron installer script
127.0.1.1 ${fqdn} ${fqdn}
EOF
else
echo "Valid hostname entry found in /etc/hosts"
fi
echo ""
echo "[INFO] ensure minimal dependencies ..."
apt-get update
apt-get install -y curl
echo ""
echo "[INFO] Generating certificates ..."
rm -rf "${cert_folder}"
mkdir -p "${cert_folder}"
cat > "${cert_folder}/CONFIG" <<EOF
[ req ]
default_bits = 1024
default_keyfile = keyfile.pem
distinguished_name = req_distinguished_name
prompt = no
req_extensions = v3_req
[ req_distinguished_name ]
C = DE
ST = Berlin
L = Berlin
O = Cloudron UG
OU = Cloudron
CN = ${fqdn}
emailAddress = cert@cloudron.io
[ v3_req ]
# Extensions to add to a certificate request
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = ${fqdn}
DNS.2 = *.${fqdn}
EOF
# generate cert files
openssl genrsa 2048 > "${cert_folder}/host.key"
openssl req -new -out "${cert_folder}/host.csr" -key "${cert_folder}/host.key" -config "${cert_folder}/CONFIG"
openssl x509 -req -days 3650 -in "${cert_folder}/host.csr" -signkey "${cert_folder}/host.key" -out "${cert_folder}/host.cert" -extensions v3_req -extfile "${cert_folder}/CONFIG"
# make them json compatible, by collapsing to one line
tls_cert=$(sed ':a;N;$!ba;s/\n/\\n/g' "${cert_folder}/host.cert")
tls_key=$(sed ':a;N;$!ba;s/\n/\\n/g' "${cert_folder}/host.key")
echo ""
echo "[INFO] Fetching installer code ..."
curl "${installer_code_url}" -o "${installer_code_file}"
echo ""
echo "[INFO] Extracting installer code to ${installer_tmp_dir} ..."
rm -rf "${installer_tmp_dir}" && mkdir -p "${installer_tmp_dir}"
tar xvf "${installer_code_file}" -C "${installer_tmp_dir}"
echo ""
echo "Creating initial provisioning config ..."
cat > /root/provision.json <<EOF
{
"sourceTarballUrl": "",
"data": {
"apiServerOrigin": "${api_server_origin}",
"webServerOrigin": "${web_server_origin}",
"fqdn": "${fqdn}",
"token": "",
"isCustomDomain": true,
"boxVersionsUrl": "${versions_url}",
"version": "",
"tlsCert": "${tls_cert}",
"tlsKey": "${tls_key}",
"provider": "${provider}",
"backupConfig": {
"provider": "s3",
"accessKeyId": "${aws_access_key_id}",
"secretAccessKey": "${aws_access_key_secret}",
"bucket": "${aws_backup_bucket}",
"prefix": "backups"
},
"dnsConfig": {
"provider": "route53",
"accessKeyId": "${aws_access_key_id}",
"secretAccessKey": "${aws_access_key_secret}"
},
"tlsConfig": {
"provider": "letsencrypt-<%= env %>"
}
}
}
EOF
echo "[INFO] Running Ubuntu initializing script ..."
/bin/bash "${installer_tmp_dir}/baseimage/initializeBaseUbuntuImage.sh" "${revision}" selfhosting
echo ""
echo "[INFO] Reloading systemd daemon ..."
systemctl daemon-reload
echo ""
echo "[INFO] Restart docker ..."
systemctl restart docker
echo ""
echo "[FINISHED] Now starting Cloudron init jobs ..."
systemctl start box-setup
# TODO this is only for convenience we should probably just let the user do a restart
sleep 5 && sync
systemctl start cloudron-installer
journalctl -u cloudron-installer.service -f

516
installer/npm-shrinkwrap.json generated Normal file
View File

@@ -0,0 +1,516 @@
{
"name": "installer",
"version": "0.0.1",
"dependencies": {
"async": {
"version": "1.5.0",
"from": "async@>=1.5.0 <2.0.0",
"resolved": "https://registry.npmjs.org/async/-/async-1.5.0.tgz"
},
"body-parser": {
"version": "1.14.1",
"from": "body-parser@>=1.12.0 <2.0.0",
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.14.1.tgz",
"dependencies": {
"bytes": {
"version": "2.1.0",
"from": "bytes@2.1.0",
"resolved": "https://registry.npmjs.org/bytes/-/bytes-2.1.0.tgz"
},
"content-type": {
"version": "1.0.1",
"from": "content-type@>=1.0.1 <1.1.0",
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.1.tgz"
},
"depd": {
"version": "1.1.0",
"from": "depd@>=1.1.0 <1.2.0",
"resolved": "https://registry.npmjs.org/depd/-/depd-1.1.0.tgz"
},
"http-errors": {
"version": "1.3.1",
"from": "http-errors@>=1.3.1 <1.4.0",
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.3.1.tgz",
"dependencies": {
"inherits": {
"version": "2.0.1",
"from": "inherits@>=2.0.1 <2.1.0",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz"
},
"statuses": {
"version": "1.2.1",
"from": "statuses@>=1.0.0 <2.0.0",
"resolved": "https://registry.npmjs.org/statuses/-/statuses-1.2.1.tgz"
}
}
},
"iconv-lite": {
"version": "0.4.12",
"from": "iconv-lite@0.4.12",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.12.tgz"
},
"on-finished": {
"version": "2.3.0",
"from": "on-finished@>=2.3.0 <2.4.0",
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
"dependencies": {
"ee-first": {
"version": "1.1.1",
"from": "ee-first@1.1.1",
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz"
}
}
},
"qs": {
"version": "5.1.0",
"from": "qs@5.1.0",
"resolved": "https://registry.npmjs.org/qs/-/qs-5.1.0.tgz"
},
"raw-body": {
"version": "2.1.4",
"from": "raw-body@>=2.1.4 <2.2.0",
"resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.1.4.tgz",
"dependencies": {
"unpipe": {
"version": "1.0.0",
"from": "unpipe@1.0.0",
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz"
}
}
},
"type-is": {
"version": "1.6.9",
"from": "type-is@>=1.6.9 <1.7.0",
"resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.9.tgz",
"dependencies": {
"media-typer": {
"version": "0.3.0",
"from": "media-typer@0.3.0",
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz"
},
"mime-types": {
"version": "2.1.7",
"from": "mime-types@>=2.1.7 <2.2.0",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.7.tgz",
"dependencies": {
"mime-db": {
"version": "1.19.0",
"from": "mime-db@>=1.19.0 <1.20.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.19.0.tgz"
}
}
}
}
}
}
},
"connect-lastmile": {
"version": "0.0.13",
"from": "connect-lastmile@0.0.13",
"resolved": "https://registry.npmjs.org/connect-lastmile/-/connect-lastmile-0.0.13.tgz",
"dependencies": {
"debug": {
"version": "2.1.3",
"from": "debug@>=2.1.0 <2.2.0",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.1.3.tgz",
"dependencies": {
"ms": {
"version": "0.7.0",
"from": "ms@0.7.0",
"resolved": "http://registry.npmjs.org/ms/-/ms-0.7.0.tgz"
}
}
}
}
},
"debug": {
"version": "2.2.0",
"from": "debug@>=2.1.1 <3.0.0",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.2.0.tgz",
"dependencies": {
"ms": {
"version": "0.7.1",
"from": "ms@0.7.1",
"resolved": "https://registry.npmjs.org/ms/-/ms-0.7.1.tgz"
}
}
},
"express": {
"version": "4.13.3",
"from": "express@>=4.11.2 <5.0.0",
"resolved": "https://registry.npmjs.org/express/-/express-4.13.3.tgz",
"dependencies": {
"accepts": {
"version": "1.2.13",
"from": "accepts@>=1.2.12 <1.3.0",
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.2.13.tgz",
"dependencies": {
"mime-types": {
"version": "2.1.7",
"from": "mime-types@>=2.1.6 <2.2.0",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.7.tgz",
"dependencies": {
"mime-db": {
"version": "1.19.0",
"from": "mime-db@>=1.19.0 <1.20.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.19.0.tgz"
}
}
},
"negotiator": {
"version": "0.5.3",
"from": "negotiator@0.5.3",
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.5.3.tgz"
}
}
},
"array-flatten": {
"version": "1.1.1",
"from": "array-flatten@1.1.1",
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz"
},
"content-disposition": {
"version": "0.5.0",
"from": "content-disposition@0.5.0",
"resolved": "http://registry.npmjs.org/content-disposition/-/content-disposition-0.5.0.tgz"
},
"content-type": {
"version": "1.0.1",
"from": "content-type@>=1.0.1 <1.1.0",
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.1.tgz"
},
"cookie": {
"version": "0.1.3",
"from": "cookie@0.1.3",
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.1.3.tgz"
},
"cookie-signature": {
"version": "1.0.6",
"from": "cookie-signature@1.0.6",
"resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz"
},
"depd": {
"version": "1.0.1",
"from": "depd@>=1.0.1 <1.1.0",
"resolved": "http://registry.npmjs.org/depd/-/depd-1.0.1.tgz"
},
"escape-html": {
"version": "1.0.2",
"from": "escape-html@1.0.2",
"resolved": "http://registry.npmjs.org/escape-html/-/escape-html-1.0.2.tgz"
},
"etag": {
"version": "1.7.0",
"from": "etag@>=1.7.0 <1.8.0",
"resolved": "https://registry.npmjs.org/etag/-/etag-1.7.0.tgz"
},
"finalhandler": {
"version": "0.4.0",
"from": "finalhandler@0.4.0",
"resolved": "http://registry.npmjs.org/finalhandler/-/finalhandler-0.4.0.tgz",
"dependencies": {
"unpipe": {
"version": "1.0.0",
"from": "unpipe@>=1.0.0 <1.1.0",
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz"
}
}
},
"fresh": {
"version": "0.3.0",
"from": "fresh@0.3.0",
"resolved": "https://registry.npmjs.org/fresh/-/fresh-0.3.0.tgz"
},
"merge-descriptors": {
"version": "1.0.0",
"from": "merge-descriptors@1.0.0",
"resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.0.tgz"
},
"methods": {
"version": "1.1.1",
"from": "methods@>=1.1.1 <1.2.0",
"resolved": "https://registry.npmjs.org/methods/-/methods-1.1.1.tgz"
},
"on-finished": {
"version": "2.3.0",
"from": "on-finished@>=2.3.0 <2.4.0",
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
"dependencies": {
"ee-first": {
"version": "1.1.1",
"from": "ee-first@1.1.1",
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz"
}
}
},
"parseurl": {
"version": "1.3.0",
"from": "parseurl@>=1.3.0 <1.4.0",
"resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.0.tgz"
},
"path-to-regexp": {
"version": "0.1.7",
"from": "path-to-regexp@0.1.7",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz"
},
"proxy-addr": {
"version": "1.0.8",
"from": "proxy-addr@>=1.0.8 <1.1.0",
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-1.0.8.tgz",
"dependencies": {
"forwarded": {
"version": "0.1.0",
"from": "forwarded@>=0.1.0 <0.2.0",
"resolved": "http://registry.npmjs.org/forwarded/-/forwarded-0.1.0.tgz"
},
"ipaddr.js": {
"version": "1.0.1",
"from": "ipaddr.js@1.0.1",
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.0.1.tgz"
}
}
},
"qs": {
"version": "4.0.0",
"from": "qs@4.0.0",
"resolved": "https://registry.npmjs.org/qs/-/qs-4.0.0.tgz"
},
"range-parser": {
"version": "1.0.3",
"from": "range-parser@>=1.0.2 <1.1.0",
"resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.0.3.tgz"
},
"send": {
"version": "0.13.0",
"from": "send@0.13.0",
"resolved": "http://registry.npmjs.org/send/-/send-0.13.0.tgz",
"dependencies": {
"destroy": {
"version": "1.0.3",
"from": "destroy@1.0.3",
"resolved": "http://registry.npmjs.org/destroy/-/destroy-1.0.3.tgz"
},
"http-errors": {
"version": "1.3.1",
"from": "http-errors@>=1.3.1 <1.4.0",
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.3.1.tgz",
"dependencies": {
"inherits": {
"version": "2.0.1",
"from": "inherits@>=2.0.1 <2.1.0",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz"
}
}
},
"mime": {
"version": "1.3.4",
"from": "mime@1.3.4",
"resolved": "https://registry.npmjs.org/mime/-/mime-1.3.4.tgz"
},
"ms": {
"version": "0.7.1",
"from": "ms@0.7.1",
"resolved": "https://registry.npmjs.org/ms/-/ms-0.7.1.tgz"
},
"statuses": {
"version": "1.2.1",
"from": "statuses@>=1.2.1 <1.3.0",
"resolved": "https://registry.npmjs.org/statuses/-/statuses-1.2.1.tgz"
}
}
},
"serve-static": {
"version": "1.10.0",
"from": "serve-static@>=1.10.0 <1.11.0",
"resolved": "http://registry.npmjs.org/serve-static/-/serve-static-1.10.0.tgz"
},
"type-is": {
"version": "1.6.9",
"from": "type-is@>=1.6.9 <1.7.0",
"resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.9.tgz",
"dependencies": {
"media-typer": {
"version": "0.3.0",
"from": "media-typer@0.3.0",
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz"
},
"mime-types": {
"version": "2.1.7",
"from": "mime-types@>=2.1.6 <2.2.0",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.7.tgz",
"dependencies": {
"mime-db": {
"version": "1.19.0",
"from": "mime-db@>=1.19.0 <1.20.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.19.0.tgz"
}
}
}
}
},
"utils-merge": {
"version": "1.0.0",
"from": "utils-merge@1.0.0",
"resolved": "http://registry.npmjs.org/utils-merge/-/utils-merge-1.0.0.tgz"
},
"vary": {
"version": "1.0.1",
"from": "vary@>=1.0.1 <1.1.0",
"resolved": "https://registry.npmjs.org/vary/-/vary-1.0.1.tgz"
}
}
},
"json": {
"version": "9.0.3",
"from": "json@>=9.0.3 <10.0.0",
"resolved": "https://registry.npmjs.org/json/-/json-9.0.3.tgz"
},
"morgan": {
"version": "1.6.1",
"from": "morgan@>=1.5.1 <2.0.0",
"resolved": "https://registry.npmjs.org/morgan/-/morgan-1.6.1.tgz",
"dependencies": {
"basic-auth": {
"version": "1.0.3",
"from": "basic-auth@>=1.0.3 <1.1.0",
"resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-1.0.3.tgz"
},
"depd": {
"version": "1.0.1",
"from": "depd@>=1.0.1 <1.1.0",
"resolved": "http://registry.npmjs.org/depd/-/depd-1.0.1.tgz"
},
"on-finished": {
"version": "2.3.0",
"from": "on-finished@>=2.3.0 <2.4.0",
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
"dependencies": {
"ee-first": {
"version": "1.1.1",
"from": "ee-first@1.1.1",
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz"
}
}
},
"on-headers": {
"version": "1.0.1",
"from": "on-headers@>=1.0.0 <1.1.0",
"resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.1.tgz"
}
}
},
"proxy-middleware": {
"version": "0.15.0",
"from": "proxy-middleware@>=0.15.0 <0.16.0",
"resolved": "https://registry.npmjs.org/proxy-middleware/-/proxy-middleware-0.15.0.tgz"
},
"safetydance": {
"version": "0.0.19",
"from": "safetydance@0.0.19",
"resolved": "https://registry.npmjs.org/safetydance/-/safetydance-0.0.19.tgz"
},
"semver": {
"version": "5.1.0",
"from": "semver@>=5.1.0 <6.0.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-5.1.0.tgz"
},
"superagent": {
"version": "0.21.0",
"from": "superagent@>=0.21.0 <0.22.0",
"resolved": "https://registry.npmjs.org/superagent/-/superagent-0.21.0.tgz",
"dependencies": {
"component-emitter": {
"version": "1.1.2",
"from": "component-emitter@1.1.2",
"resolved": "http://registry.npmjs.org/component-emitter/-/component-emitter-1.1.2.tgz"
},
"cookiejar": {
"version": "2.0.1",
"from": "cookiejar@2.0.1",
"resolved": "https://registry.npmjs.org/cookiejar/-/cookiejar-2.0.1.tgz"
},
"extend": {
"version": "1.2.1",
"from": "extend@>=1.2.1 <1.3.0",
"resolved": "https://registry.npmjs.org/extend/-/extend-1.2.1.tgz"
},
"form-data": {
"version": "0.1.3",
"from": "form-data@0.1.3",
"resolved": "http://registry.npmjs.org/form-data/-/form-data-0.1.3.tgz",
"dependencies": {
"async": {
"version": "0.9.2",
"from": "async@>=0.9.0 <0.10.0",
"resolved": "https://registry.npmjs.org/async/-/async-0.9.2.tgz"
},
"combined-stream": {
"version": "0.0.7",
"from": "combined-stream@>=0.0.4 <0.1.0",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-0.0.7.tgz",
"dependencies": {
"delayed-stream": {
"version": "0.0.5",
"from": "delayed-stream@0.0.5",
"resolved": "http://registry.npmjs.org/delayed-stream/-/delayed-stream-0.0.5.tgz"
}
}
}
}
},
"formidable": {
"version": "1.0.14",
"from": "formidable@1.0.14",
"resolved": "https://registry.npmjs.org/formidable/-/formidable-1.0.14.tgz"
},
"methods": {
"version": "1.0.1",
"from": "methods@1.0.1",
"resolved": "https://registry.npmjs.org/methods/-/methods-1.0.1.tgz"
},
"mime": {
"version": "1.2.11",
"from": "mime@1.2.11",
"resolved": "https://registry.npmjs.org/mime/-/mime-1.2.11.tgz"
},
"qs": {
"version": "1.2.0",
"from": "qs@1.2.0",
"resolved": "https://registry.npmjs.org/qs/-/qs-1.2.0.tgz"
},
"readable-stream": {
"version": "1.0.27-1",
"from": "readable-stream@1.0.27-1",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.27-1.tgz",
"dependencies": {
"core-util-is": {
"version": "1.0.1",
"from": "core-util-is@>=1.0.0 <1.1.0",
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.1.tgz"
},
"inherits": {
"version": "2.0.1",
"from": "inherits@>=2.0.1 <2.1.0",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz"
},
"isarray": {
"version": "0.0.1",
"from": "isarray@0.0.1",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz"
},
"string_decoder": {
"version": "0.10.31",
"from": "string_decoder@>=0.10.0 <0.11.0",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz"
}
}
},
"reduce-component": {
"version": "1.0.1",
"from": "reduce-component@1.0.1",
"resolved": "http://registry.npmjs.org/reduce-component/-/reduce-component-1.0.1.tgz"
}
}
}
}
}

47
installer/package.json Normal file
View File

@@ -0,0 +1,47 @@
{
"name": "installer",
"description": "Cloudron Installer",
"version": "0.0.1",
"private": "true",
"author": {
"name": "Cloudron authors"
},
"repository": {
"type": "git"
},
"engines": [
"node >=4.0.0 <=4.1.1"
],
"dependencies": {
"async": "^1.5.0",
"body-parser": "^1.12.0",
"connect-lastmile": "0.0.13",
"debug": "^2.1.1",
"express": "^4.11.2",
"json": "^9.0.3",
"morgan": "^1.5.1",
"proxy-middleware": "^0.15.0",
"safetydance": "0.0.19",
"semver": "^5.1.0",
"superagent": "^0.21.0"
},
"devDependencies": {
"colors": "^1.1.2",
"commander": "^2.8.1",
"expect.js": "^0.3.1",
"istanbul": "^0.3.5",
"lodash": "^3.2.0",
"mocha": "^2.1.0",
"nock": "^0.59.1",
"sleep": "^3.0.0",
"superagent-sync": "^0.2.0",
"supererror": "^0.7.0",
"yesno": "0.0.1"
},
"scripts": {
"test": "NODE_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- -R spec ./src/test",
"precommit": "/bin/true",
"prepush": "npm test",
"postmerge": "/bin/true"
}
}

112
installer/src/installer.js Normal file
View File

@@ -0,0 +1,112 @@
/* jslint node: true */
'use strict';
var assert = require('assert'),
child_process = require('child_process'),
debug = require('debug')('installer:installer'),
path = require('path'),
safe = require('safetydance'),
semver = require('semver'),
superagent = require('superagent'),
util = require('util');
exports = module.exports = {
InstallerError: InstallerError,
provision: provision,
_ensureVersion: ensureVersion
};
var INSTALLER_CMD = path.join(__dirname, 'scripts/installer.sh'),
SUDO = '/usr/bin/sudo';
function InstallerError(reason, info) {
Error.call(this);
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
this.reason = reason;
this.message = !info ? reason : (typeof info === 'object' ? JSON.stringify(info) : info);
}
util.inherits(InstallerError, Error);
InstallerError.INTERNAL_ERROR = 1;
InstallerError.ALREADY_PROVISIONED = 2;
// system until file has KillMode=control-group to bring down child processes
function spawn(tag, cmd, args, callback) {
assert.strictEqual(typeof tag, 'string');
assert.strictEqual(typeof cmd, 'string');
assert(util.isArray(args));
assert.strictEqual(typeof callback, 'function');
var cp = child_process.spawn(cmd, args, { timeout: 0 });
cp.stdout.setEncoding('utf8');
cp.stdout.on('data', function (data) { debug('%s (stdout): %s', tag, data); });
cp.stderr.setEncoding('utf8');
cp.stderr.on('data', function (data) { debug('%s (stderr): %s', tag, data); });
cp.on('error', function (error) {
debug('%s : child process errored %s', tag, error.message);
callback(error);
});
cp.on('exit', function (code, signal) {
debug('%s : child process exited. code: %d signal: %d', tag, code, signal);
if (signal) return callback(new Error('Exited with signal ' + signal));
if (code !== 0) return callback(new Error('Exited with code ' + code));
callback(null);
});
}
function ensureVersion(args, callback) {
assert.strictEqual(typeof args, 'object');
assert.strictEqual(typeof callback, 'function');
if (!args.data || !args.data.boxVersionsUrl) return callback(new Error('No boxVersionsUrl specified'));
if (args.sourceTarballUrl) return callback(null, args);
superagent.get(args.data.boxVersionsUrl).end(function (error, result) {
if (error && !error.response) return callback(error);
if (result.statusCode !== 200) return callback(new Error(util.format('Bad status: %s %s', result.statusCode, result.text)));
var versions = safe.JSON.parse(result.text);
if (!versions || typeof versions !== 'object') return callback(new Error('versions is not in valid format:' + safe.error));
var latestVersion = Object.keys(versions).sort(semver.compare).pop();
debug('ensureVersion: Latest version is %s etag:%s', latestVersion, result.header['etag']);
if (!versions[latestVersion]) return callback(new Error('No version available'));
if (!versions[latestVersion].sourceTarballUrl) return callback(new Error('No sourceTarballUrl specified'));
args.sourceTarballUrl = versions[latestVersion].sourceTarballUrl;
args.data.version = latestVersion;
callback(null, args);
});
}
function provision(args, callback) {
assert.strictEqual(typeof args, 'object');
assert.strictEqual(typeof callback, 'function');
if (process.env.NODE_ENV === 'test') return callback(null);
ensureVersion(args, function (error, result) {
if (error) return callback(error);
var pargs = [ INSTALLER_CMD ];
pargs.push('--sourcetarballurl', result.sourceTarballUrl);
pargs.push('--data', JSON.stringify(result.data));
debug('provision: calling with args %j', pargs);
// sudo is required for update()
spawn('provision', SUDO, pargs, callback);
});
}

View File

@@ -0,0 +1,67 @@
#!/bin/bash
set -eu -o pipefail
readonly BOX_SRC_DIR=/home/yellowtent/box
readonly DATA_DIR=/home/yellowtent/data
readonly script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly json="${script_dir}/../../node_modules/.bin/json"
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 300"
readonly is_update=$([[ -d "${BOX_SRC_DIR}" ]] && echo "yes" || echo "no")
# create a provision file for testing. %q escapes args. %q is reused as much as necessary to satisfy $@
(echo -e "#!/bin/bash\n"; printf "%q " "${script_dir}/installer.sh" "$@") > /home/yellowtent/provision.sh
chmod +x /home/yellowtent/provision.sh
arg_source_tarball_url=""
arg_data=""
args=$(getopt -o "" -l "sourcetarballurl:,data:" -n "$0" -- "$@")
eval set -- "${args}"
while true; do
case "$1" in
--sourcetarballurl) arg_source_tarball_url="$2";;
--data) arg_data="$2";;
--) break;;
*) echo "Unknown option $1"; exit 1;;
esac
shift 2
done
box_src_tmp_dir=$(mktemp -dt box-src-XXXXXX)
echo "Downloading box code from ${arg_source_tarball_url} to ${box_src_tmp_dir}"
while true; do
if $curl -L "${arg_source_tarball_url}" | tar -zxf - -C "${box_src_tmp_dir}"; then break; fi
echo "Failed to download source tarball, trying again"
sleep 5
done
while true; do
# for reasons unknown, the dtrace package will fail. but rebuilding second time will work
if cd "${box_src_tmp_dir}" && npm rebuild; then break; fi
echo "Failed to rebuild, trying again"
sleep 5
done
if [[ "${is_update}" == "yes" ]]; then
echo "Setting up update splash screen"
"${box_src_tmp_dir}/setup/splashpage.sh" --data "${arg_data}" # show splash from new code
${BOX_SRC_DIR}/setup/stop.sh # stop the old code
fi
# switch the codes
rm -rf "${BOX_SRC_DIR}"
mv "${box_src_tmp_dir}" "${BOX_SRC_DIR}"
chown -R yellowtent.yellowtent "${BOX_SRC_DIR}"
# create a start file for testing. %q escapes args
(echo -e "#!/bin/bash\n"; printf "%q " "${BOX_SRC_DIR}/setup/start.sh" --data "${arg_data}") > /home/yellowtent/setup_start.sh
chmod +x /home/yellowtent/setup_start.sh
echo "Calling box setup script"
"${BOX_SRC_DIR}/setup/start.sh" --data "${arg_data}"

144
installer/src/server.js Executable file
View File

@@ -0,0 +1,144 @@
#!/usr/bin/env node
/* jslint node: true */
'use strict';
var assert = require('assert'),
async = require('async'),
debug = require('debug')('installer:server'),
express = require('express'),
fs = require('fs'),
http = require('http'),
HttpError = require('connect-lastmile').HttpError,
HttpSuccess = require('connect-lastmile').HttpSuccess,
installer = require('./installer.js'),
json = require('body-parser').json,
lastMile = require('connect-lastmile'),
morgan = require('morgan'),
superagent = require('superagent');
exports = module.exports = {
start: start,
stop: stop
};
var PROVISION_CONFIG_FILE = '/root/provision.json';
var CLOUDRON_CONFIG_FILE = '/home/yellowtent/configs/cloudron.conf';
var gHttpServer = null; // update server; used for updates
function provisionDigitalOcean(callback) {
if (fs.existsSync(CLOUDRON_CONFIG_FILE)) return callback(null); // already provisioned
superagent.get('http://169.254.169.254/metadata/v1.json').end(function (error, result) {
if (error || result.statusCode !== 200) {
console.error('Error getting metadata', error);
return callback(new Error('Error getting metadata'));
}
var userData = JSON.parse(result.body.user_data);
installer.provision(userData, callback);
});
}
function provisionLocal(callback) {
if (fs.existsSync(CLOUDRON_CONFIG_FILE)) return callback(null); // already provisioned
if (!fs.existsSync(PROVISION_CONFIG_FILE)) {
console.error('No provisioning data found at %s', PROVISION_CONFIG_FILE);
return callback(new Error('No provisioning data found'));
}
var userData = require(PROVISION_CONFIG_FILE);
installer.provision(userData, callback);
}
function update(req, res, next) {
assert.strictEqual(typeof req.body, 'object');
if (!req.body.sourceTarballUrl || typeof req.body.sourceTarballUrl !== 'string') return next(new HttpError(400, 'No sourceTarballUrl provided'));
if (!req.body.data || typeof req.body.data !== 'object') return next(new HttpError(400, 'No data provided'));
debug('provision: received from box %j', req.body);
installer.provision(req.body, function (error) {
if (error) console.error(error);
});
next(new HttpSuccess(202, { }));
}
function startUpdateServer(callback) {
assert.strictEqual(typeof callback, 'function');
debug('Starting update server');
var app = express();
var router = new express.Router();
if (process.env.NODE_ENV !== 'test') app.use(morgan('dev', { immediate: false }));
app.use(json({ strict: true }))
.use(router)
.use(lastMile());
router.post('/api/v1/installer/update', update);
gHttpServer = http.createServer(app);
gHttpServer.on('error', console.error);
gHttpServer.listen(2020, '127.0.0.1', callback);
}
function stopUpdateServer(callback) {
assert.strictEqual(typeof callback, 'function');
debug('Stopping update server');
if (!gHttpServer) return callback(null);
gHttpServer.close(callback);
gHttpServer = null;
}
function start(callback) {
assert.strictEqual(typeof callback, 'function');
var actions;
if (process.env.PROVISION === 'local') {
debug('Starting Installer in selfhost mode');
actions = [
startUpdateServer,
provisionLocal
];
} else { // current fallback, should be 'digitalocean' eventually, see initializeBaseUbuntuImage.sh
debug('Starting Installer in managed mode');
actions = [
startUpdateServer,
provisionDigitalOcean
];
}
async.series(actions, callback);
}
function stop(callback) {
assert.strictEqual(typeof callback, 'function');
async.series([
stopUpdateServer
], callback);
}
if (require.main === module) {
start(function (error) {
if (error) console.error(error);
});
}

View File

@@ -0,0 +1,179 @@
/* jslint node:true */
/* global it:false */
/* global describe:false */
/* global before:false */
/* global after:false */
'use strict';
var expect = require('expect.js'),
fs = require('fs'),
path = require('path'),
nock = require('nock'),
os = require('os'),
request = require('superagent'),
server = require('../server.js'),
installer = require('../installer.js'),
_ = require('lodash');
var EXTERNAL_SERVER_URL = 'https://localhost:4443';
var INTERNAL_SERVER_URL = 'http://localhost:2020';
var APPSERVER_ORIGIN = 'http://appserver';
var FQDN = os.hostname();
describe('Server', function () {
this.timeout(5000);
before(function (done) {
var user_data = JSON.stringify({ apiServerOrigin: APPSERVER_ORIGIN }); // user_data is a string
var scope = nock('http://169.254.169.254')
.persist()
.get('/metadata/v1.json')
.reply(200, JSON.stringify({ user_data: user_data }), { 'Content-Type': 'application/json' });
done();
});
after(function (done) {
nock.cleanAll();
done();
});
describe('starts and stop', function () {
it('starts', function (done) {
server.start(done);
});
it('stops', function (done) {
server.stop(done);
});
});
describe('update (internal server)', function () {
before(function (done) {
server.start(done);
});
after(function (done) {
server.stop(done);
});
it('does not respond to provision', function (done) {
request.post(INTERNAL_SERVER_URL + '/api/v1/installer/provision').send({ }).end(function (error, result) {
expect(error).to.not.be.ok();
expect(result.statusCode).to.equal(404);
done();
});
});
it('does not respond to restore', function (done) {
request.post(INTERNAL_SERVER_URL + '/api/v1/installer/restore').send({ }).end(function (error, result) {
expect(error).to.not.be.ok();
expect(result.statusCode).to.equal(404);
done();
});
});
var data = {
sourceTarballUrl: "https://foo.tar.gz",
data: {
token: 'sometoken',
apiServerOrigin: APPSERVER_ORIGIN,
webServerOrigin: 'https://somethingelse.com',
fqdn: 'www.something.com',
tlsKey: 'key',
tlsCert: 'cert',
boxVersionsUrl: 'https://versions.json',
version: '0.1'
}
};
Object.keys(data).forEach(function (key) {
it('fails due to missing ' + key, function (done) {
var dataCopy = _.merge({ }, data);
delete dataCopy[key];
request.post(INTERNAL_SERVER_URL + '/api/v1/installer/update').send(dataCopy).end(function (error, result) {
expect(error).to.not.be.ok();
expect(result.statusCode).to.equal(400);
done();
});
});
});
it('succeeds', function (done) {
request.post(INTERNAL_SERVER_URL + '/api/v1/installer/update').send(data).end(function (error, result) {
expect(error).to.not.be.ok();
expect(result.statusCode).to.equal(202);
done();
});
});
});
describe('ensureVersion', function () {
before(function () {
process.env.NODE_ENV = undefined;
});
after(function () {
process.env.NODE_ENV = 'test';
});
it ('fails without data', function (done) {
installer._ensureVersion({}, function (error) {
expect(error).to.be.an(Error);
done();
});
});
it ('fails without boxVersionsUrl', function (done) {
installer._ensureVersion({ data: {}}, function (error) {
expect(error).to.be.an(Error);
done();
});
});
it ('succeeds with sourceTarballUrl', function (done) {
var data = {
sourceTarballUrl: 'sometarballurl',
data: {
boxVersionsUrl: 'http://foobar/versions.json'
}
};
installer._ensureVersion(data, function (error, result) {
expect(error).to.equal(null);
expect(result).to.eql(data);
done();
});
});
it ('succeeds without sourceTarballUrl', function (done) {
var versions = {
'0.1.0': {
sourceTarballUrl: 'sometarballurl1'
},
'0.2.0': {
sourceTarballUrl: 'sometarballurl2'
}
};
var scope = nock('http://foobar')
.get('/versions.json')
.reply(200, JSON.stringify(versions), { 'Content-Type': 'application/json' });
var data = {
data: {
boxVersionsUrl: 'http://foobar/versions.json'
}
};
installer._ensureVersion(data, function (error, result) {
expect(error).to.equal(null);
expect(result.sourceTarballUrl).to.equal(versions['0.2.0'].sourceTarballUrl);
expect(result.data.boxVersionsUrl).to.equal(data.data.boxVersionsUrl);
done();
});
});
});
});

65
installer/systemd/box-setup.sh Executable file
View File

@@ -0,0 +1,65 @@
#!/bin/bash
set -eu -o pipefail
readonly USER_HOME="/home/yellowtent"
readonly APPS_SWAP_FILE="/apps.swap"
readonly BACKUP_SWAP_FILE="/backup.swap" # used when doing app backups
readonly USER_DATA_FILE="/root/user_data.img"
readonly USER_DATA_DIR="/home/yellowtent/data"
# detect device
if [[ -b "/dev/vda1" ]]; then
disk_device="/dev/vda1"
fi
if [[ -b "/dev/xvda1" ]]; then
disk_device="/dev/xvda1"
fi
# all sizes are in mb
readonly physical_memory=$(free -m | awk '/Mem:/ { print $2 }')
readonly swap_size="${physical_memory}" # if you change this, fix enoughResourcesAvailable() in client.js
readonly app_count=$((${physical_memory} / 200)) # estimated app count
readonly disk_size_gb=$(fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ print $3 }')
readonly disk_size=$((disk_size_gb * 1024))
readonly backup_swap_size=1024
# readonly system_size=5120 # 5 gigs for system libs, installer, box code and tmp
readonly system_size=10240 # 10 gigs for system libs, apps images, installer, box code and tmp
readonly ext4_reserved=$((disk_size * 5 / 100)) # this can be changes using tune2fs -m percent /dev/vda1
echo "Disk device: ${disk_device}"
echo "Physical memory: ${physical_memory}"
echo "Estimated app count: ${app_count}"
echo "Disk size: ${disk_size}"
# Allocate two sets of swap files - one for general app usage and another for backup
# The backup swap is setup for swap on the fly by the backup scripts
if [[ ! -f "${APPS_SWAP_FILE}" ]]; then
echo "Creating Apps swap file of size ${swap_size}M"
fallocate -l "${swap_size}m" "${APPS_SWAP_FILE}"
chmod 600 "${APPS_SWAP_FILE}"
mkswap "${APPS_SWAP_FILE}"
swapon "${APPS_SWAP_FILE}"
echo "${APPS_SWAP_FILE} none swap sw 0 0" >> /etc/fstab
else
echo "Apps Swap file already exists"
fi
if [[ ! -f "${BACKUP_SWAP_FILE}" ]]; then
echo "Creating Backup swap file of size ${backup_swap_size}M"
fallocate -l "${backup_swap_size}m" "${BACKUP_SWAP_FILE}"
chmod 600 "${BACKUP_SWAP_FILE}"
mkswap "${BACKUP_SWAP_FILE}"
else
echo "Backups Swap file already exists"
fi
echo "Resizing data volume"
home_data_size=$((disk_size - system_size - swap_size - backup_swap_size - ext4_reserved))
echo "Resizing up btrfs user data to size ${home_data_size}M"
umount "${USER_DATA_DIR}"
fallocate -l "${home_data_size}m" "${USER_DATA_FILE}" # does not overwrite existing data
mount "${USER_DATA_FILE}"
btrfs filesystem resize max "${USER_DATA_DIR}"

BIN
logo.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.4 KiB

View File

@@ -1,5 +1,5 @@
'use strict';
var dbm = require('db-migrate');
var type = dbm.dataType;
var url = require('url');
exports.up = function(db, callback) {

View File

@@ -1,4 +1,5 @@
'use strict';
var dbm = require('db-migrate');
var type = dbm.dataType;
var fs = require('fs'),
async = require('async'),

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE users ADD COLUMN resetToken VARCHAR(128) DEFAULT ""', function (error) {
@@ -13,3 +14,4 @@ exports.down = function(db, callback) {
callback(error);
});
};

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('DELETE FROM tokens', [], function (error) {

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE authcodes ADD COLUMN expiresAt BIGINT NOT NULL', function (error) {
@@ -12,4 +13,4 @@ exports.down = function(db, callback) {
if (error) console.error(error);
callback(error);
});
};
};

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE appPortBindings ADD COLUMN environmentVariable VARCHAR(128) NOT NULL', function (error) {
@@ -13,3 +14,4 @@ exports.down = function(db, callback) {
callback(error);
});
};

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE appPortBindings DROP COLUMN containerPort', function (error) {
@@ -13,3 +14,4 @@ exports.down = function(db, callback) {
callback(error);
});
};

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('DELETE FROM tokens', [], function (error) {

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN version', function (error) {
@@ -13,3 +14,4 @@ exports.down = function(db, callback) {
callback(error);
});
};

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN healthy, ADD COLUMN health VARCHAR(128)', [], function (error) {

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN lastBackupId VARCHAR(128)', function (error) {
@@ -13,3 +14,4 @@ exports.down = function(db, callback) {
callback(error);
});
};

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN createdAt TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP', function (error) {
@@ -13,3 +14,4 @@ exports.down = function(db, callback) {
callback(error);
});
};

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
// everyday at 1am
@@ -7,4 +8,5 @@ exports.up = function(db, callback) {
exports.down = function(db, callback) {
db.runSql('DELETE * FROM settings WHERE name="autoupdate_pattern"', [ ], callback);
};
}

View File

@@ -1,6 +1,6 @@
'use strict';
dbm = dbm || require('db-migrate');
var safe = require('safetydance');
var type = dbm.dataType;
exports.up = function(db, callback) {
var tz = safe.fs.readFileSync('/etc/timezone', 'utf8');
@@ -12,3 +12,4 @@ exports.up = function(db, callback) {
exports.down = function(db, callback) {
db.runSql('DELETE * FROM settings WHERE name="time_zone"', [ ], callback);
};

View File

@@ -1,5 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
var async = require('async');
exports.up = function(db, callback) {

View File

@@ -1,5 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
var async = require('async');
exports.up = function(db, callback) {

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN lastManifestJson VARCHAR(2048)', function (error) {
@@ -13,3 +14,4 @@ exports.down = function(db, callback) {
callback(error);
});
};

View File

@@ -1,4 +1,5 @@
'use strict';
var dbm = global.dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps CHANGE lastManifestJson lastBackupConfigJson VARCHAR(2048)', [], function (error) {
@@ -13,3 +14,4 @@ exports.down = function(db, callback) {
callback(error);
});
};

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN oldConfigJson VARCHAR(2048)', function (error) {
@@ -13,3 +14,4 @@ exports.down = function(db, callback) {
callback(error);
});
};

View File

@@ -1,4 +1,5 @@
'use strict';
var dbm = global.dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('DELETE FROM settings', [ ], callback);

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN oauthProxy BOOLEAN DEFAULT 0', function (error) {
@@ -13,3 +14,4 @@ exports.down = function(db, callback) {
callback(error);
});
};

View File

@@ -1,5 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
var async = require('async');
exports.up = function(db, callback) {

View File

@@ -1,4 +1,5 @@
'use strict';
var dbm = global.dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps CHANGE accessRestriction accessRestrictionJson VARCHAR(2048)', [], function (error) {
@@ -13,3 +14,4 @@ exports.down = function(db, callback) {
callback(error);
});
};

View File

@@ -1,4 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps MODIFY manifestJson TEXT', [], function (error) {

View File

@@ -1,5 +1,5 @@
'use strict';
dbm = dbm || require('db-migrate');
var type = dbm.dataType;
var async = require('async');
exports.up = function(db, callback) {

View File

@@ -1,4 +1,4 @@
'use strict';
dbm = dbm || require('db-migrate');
exports.up = function(db, callback) {
db.runSql('ALTER TABLE users ADD COLUMN displayName VARCHAR(512) DEFAULT ""', function (error) {

View File

@@ -1,4 +1,4 @@
'use strict';
dbm = dbm || require('db-migrate');
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN memoryLimit BIGINT DEFAULT 0', function (error) {

View File

@@ -1,7 +1,8 @@
'use strict';
var dbm = global.dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
var cmd = "CREATE TABLE userGroups(" +
var cmd = "CREATE TABLE groups(" +
"id VARCHAR(128) NOT NULL UNIQUE," +
"name VARCHAR(128) NOT NULL UNIQUE," +
"PRIMARY KEY(id))";
@@ -13,7 +14,7 @@ exports.up = function(db, callback) {
};
exports.down = function(db, callback) {
db.runSql('DROP TABLE userGroups', function (error) {
db.runSql('DROP TABLE groups', function (error) {
if (error) console.error(error);
callback(error);
});

View File

@@ -1,10 +1,11 @@
'use strict';
var dbm = global.dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
var cmd = "CREATE TABLE IF NOT EXISTS groupMembers(" +
"groupId VARCHAR(128) NOT NULL," +
"userId VARCHAR(128) NOT NULL," +
"FOREIGN KEY(groupId) REFERENCES userGroups(id)," +
"FOREIGN KEY(groupId) REFERENCES groups(id)," +
"FOREIGN KEY(userId) REFERENCES users(id));";
db.runSql(cmd, function (error) {

View File

@@ -1,17 +1,20 @@
'use strict';
var dbm = global.dbm || require('db-migrate');
var async = require('async');
var ADMIN_GROUP_ID = 'admin'; // see constants.js
var ADMIN_GROUP_ID = 'admin'; // see groups.js
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'START TRANSACTION;'),
db.runSql.bind(db, 'INSERT INTO userGroups (id, name) VALUES (?, ?)', [ ADMIN_GROUP_ID, 'admin' ]),
db.runSql.bind(db, 'INSERT INTO groups (id, name) VALUES (?, ?)', [ ADMIN_GROUP_ID, 'admin' ]),
function migrateAdminFlag(done) {
db.all('SELECT * FROM users WHERE admin=1', function (error, results) {
if (error) return done(error);
console.dir(results);
async.eachSeries(results, function (r, next) {
db.runSql('INSERT INTO groupMembers (groupId, userId) VALUES (?, ?)', [ ADMIN_GROUP_ID, r.id ], next);
}, done);

View File

@@ -1,24 +0,0 @@
'use strict';
exports.up = function(db, callback) {
var cmd = "CREATE TABLE backups(" +
"filename VARCHAR(128) NOT NULL," +
"creationTime TIMESTAMP," +
"version VARCHAR(128) NOT NULL," +
"type VARCHAR(16) NOT NULL," +
"dependsOn VARCHAR(4096)," +
"state VARCHAR(16) NOT NULL," +
"PRIMARY KEY (filename))";
db.runSql(cmd, function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('DROP TABLE backups', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE backups ADD COLUMN configJson TEXT', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE backups DROP COLUMN configJson', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE backups DROP COLUMN configJson', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE backups ADD COLUMN configJson TEXT', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE backups CHANGE filename id VARCHAR(128)', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE backups CHANGE id filename VARCHAR(128)', [], function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE users MODIFY username VARCHAR(254) UNIQUE', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE users MODIFY username VARCHAR(254) NOT NULL UNIQUE', [], function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN altDomain VARCHAR(256)', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN altDomain', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,23 +0,0 @@
'use strict';
exports.up = function(db, callback) {
var cmd = "CREATE TABLE eventlog(" +
"id VARCHAR(128) NOT NULL," +
"source TEXT," +
"creationTime TIMESTAMP," +
"action VARCHAR(128) NOT NULL," +
"data TEXT," +
"PRIMARY KEY (id))";
db.runSql(cmd, function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('DROP TABLE eventlog', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE users ADD COLUMN showTutorial BOOLEAN DEFAULT 0', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE users DROP COLUMN showTutorial', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,21 +0,0 @@
'use strict';
exports.up = function(db, callback) {
var cmd = 'CREATE TABLE mailboxes(' +
'name VARCHAR(128) NOT NULL,' +
'aliasTarget VARCHAR(128),' +
'creationTime TIMESTAMP,' +
'PRIMARY KEY (name))';
db.runSql(cmd, function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('DROP TABLE mailboxes', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,26 +0,0 @@
'use strict';
var async = require('async');
// imports mailbox entries for existing users
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'START TRANSACTION;'),
function addUserMailboxes(done) {
db.all('SELECT username FROM users', function (error, results) {
if (error) return done(error);
async.eachSeries(results, function (r, next) {
if (!r.username) return next();
db.runSql('INSERT INTO mailboxes (name) VALUES (?)', [ r.username ], next);
}, done);
});
},
db.runSql.bind(db, 'COMMIT')
], callback);
};
exports.down = function(db, callback) {
callback();
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN lastBackupConfigJson', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN lastBackupConfigJson TEXT', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps MODIFY installationProgress TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps MODIFY installationProgress VARCHAR(512)', [], function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN xFrameOptions VARCHAR(512)', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN xFrameOptions', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.all('SELECT id FROM users', function (error, results) {
if (error) return callback(error);
// existing cloudrons have email enabled by default. future cloudrons will have it disabled by default
var enable = results.length !== 0;
db.runSql('INSERT settings (name, value) VALUES("mail_config", ?)', [ JSON.stringify({ enabled: enable }) ], callback);
});
};
exports.down = function(db, callback) {
db.runSql('DELETE * FROM settings WHERE name="mail_config"', [ ], callback);
};

View File

@@ -1,73 +0,0 @@
'use strict';
var async = require('async');
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE mailboxes ADD COLUMN ownerId VARCHAR(128)'),
db.runSql.bind(db, 'ALTER TABLE mailboxes ADD COLUMN ownerType VARCHAR(16)'),
db.runSql.bind(db, 'START TRANSACTION;'),
function addGroupMailboxes(done) {
console.log('Importing group mailboxes');
db.all('SELECT id, name FROM userGroups', function (error, results) {
if (error) return done(error);
async.eachSeries(results, function (g, next) {
db.runSql('INSERT INTO mailboxes (ownerId, ownerType, name) VALUES (?, ?, ?)', [ g.id, 'group', g.name ], function (error) {
if (error) console.error('Error importing group ' + JSON.stringify(g) + error);
next();
});
}, done);
});
},
function addAppMailboxes(done) {
console.log('Importing app mail boxes');
db.all('SELECT id, location, manifestJson FROM apps', function (error, results) {
if (error) return done(error);
async.eachSeries(results, function (a, next) {
var manifest = JSON.parse(a.manifestJson);
if (!manifest.addons['sendmail'] && !manifest.addons['recvmail']) return next();
var mailboxName = (a.location ? a.location : manifest.title.replace(/[^a-zA-Z0-9]/g, '')) + '.app';
db.runSql('INSERT INTO mailboxes (ownerId, ownerType, name) VALUES (?, ?, ?)', [ a.id, 'app', mailboxName ], function (error) {
if (error) console.error('Error importing app ' + JSON.stringify(a) + error);
next();
});
}, done);
});
},
function setUserMailboxOwnerIds(done) {
console.log('Setting owner id of user mailboxes and aliases');
db.all('SELECT id, username FROM users', function (error, results) {
if (error) return done(error);
async.eachSeries(results, function (u, next) {
if (!u.username) return next();
db.runSql('UPDATE mailboxes SET ownerId = ?, ownerType = ? WHERE name = ? OR aliasTarget = ?', [ u.id, 'user', u.username, u.username ], function (error) {
if (error) console.error('Error setting ownerid ' + JSON.stringify(u) + error);
next();
});
}, done);
});
},
db.runSql.bind(db, 'COMMIT'),
db.runSql.bind(db, 'ALTER TABLE mailboxes MODIFY ownerId VARCHAR(128) NOT NULL'),
db.runSql.bind(db, 'ALTER TABLE mailboxes MODIFY ownerType VARCHAR(128) NOT NULL'),
], callback);
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE mailboxes DROP COLUMN ownerId', function (error) {
if (error) console.error(error);
db.runSql('ALTER TABLE mailboxes DROP COLUMN ownerType', function (error) {
if (error) console.error(error);
callback(error);
});
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN sso BOOLEAN DEFAULT 1', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN sso', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN oauthProxy', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN oauthProxy BOOLEAN DEFAULT 0', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE users DROP COLUMN showTutorial', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE users ADD COLUMN showTutorial BOOLEAN DEFAULT 0', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN debugModeJson TEXT', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN debugModeJson ', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE backups MODIFY dependsOn TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE backups MODIFY dependsOn VARCHAR(4096)', [], function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,16 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE appAddonConfigs ADD COLUMN name VARCHAR(128)', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE appAddonConfigs DROP COLUMN name', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,14 +0,0 @@
'use strict';
var url = require('url');
exports.up = function(db, callback) {
var dbName = url.parse(process.env.DATABASE_URL).path.substr(1); // remove slash
// by default, mysql collates case insensitively. 'utf8_general_cs' is not available
db.runSql('ALTER DATABASE ' + dbName + ' DEFAULT CHARACTER SET=utf8mb4 DEFAULT COLLATE utf8mb4_unicode_ci', callback);
};
exports.down = function(db, callback) {
callback();
};

View File

@@ -1,95 +0,0 @@
'use strict';
var async = require('async');
// from apps.js DO NOT UPDATE WHEN apps.js changes, as this is part of db migration!!
function postProcess(result) {
try {
result.manifest = JSON.parse(result.manifestJson);
delete result.manifestJson;
result.oldConfig = JSON.parse(result.oldConfigJson);
delete result.oldConfigJson;
result.portBindings = { };
var hostPorts = result.hostPorts === null ? [ ] : result.hostPorts.split(',');
var environmentVariables = result.environmentVariables === null ? [ ] : result.environmentVariables.split(',');
delete result.hostPorts;
delete result.environmentVariables;
for (var i = 0; i < environmentVariables.length; i++) {
result.portBindings[environmentVariables[i]] = parseInt(hostPorts[i], 10);
}
result.accessRestriction = JSON.parse(result.accessRestrictionJson);
if (result.accessRestriction && !result.accessRestriction.users) result.accessRestriction.users = [];
delete result.accessRestrictionJson;
// TODO remove later once all apps have this attribute
result.xFrameOptions = result.xFrameOptions || 'SAMEORIGIN';
result.sso = !!result.sso; // make it bool
result.debugMode = JSON.parse(result.debugModeJson);
delete result.debugModeJson;
} catch (e) {
console.error('Failed to get restoreConfig for app.', e);
console.error('Falling back to empty values to make the update succeed.');
result.manifest = null;
}
}
// from apps.js DO NOT UPDATE WHEN apps.js changes, as this is part of db migration!!
var APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.installationProgress', 'apps.runState',
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.httpPort', 'apps.location', 'apps.dnsRecordId',
'apps.accessRestrictionJson', 'apps.lastBackupId', 'apps.oldConfigJson', 'apps.memoryLimit', 'apps.altDomain',
'apps.xFrameOptions', 'apps.sso', 'apps.debugModeJson' ].join(',');
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE backups ADD COLUMN restoreConfigJson TEXT'),
// fill all the backups with restoreConfigs from current apps
function addRestoreConfigs(callback) {
console.log('Importing restoreConfigs');
var appQuery = 'SELECT ' + APPS_FIELDS_PREFIXED + ',' +
'GROUP_CONCAT(CAST(appPortBindings.hostPort AS CHAR(6))) AS hostPorts, GROUP_CONCAT(appPortBindings.environmentVariable) AS environmentVariables' +
' FROM apps LEFT OUTER JOIN appPortBindings ON apps.id = appPortBindings.appId' +
' GROUP BY apps.id ORDER BY apps.id';
db.all(appQuery, function (error, apps) {
if (error) return callback(error);
apps.forEach(postProcess);
async.eachSeries(apps, function (app, next) {
if (app.manifest === null) return next();
db.all('SELECT * FROM backups WHERE type="app" AND id LIKE "%app%\\_' + app.id + '\\_%"', function (error, backups) {
if (error) return next(error);
// from apps.js:getAppConfig()
var restoreConfig = {
manifest: app.manifest,
location: app.location,
accessRestriction: app.accessRestriction,
portBindings: app.portBindings,
memoryLimit: app.memoryLimit,
xFrameOptions: app.xFrameOptions || 'SAMEORIGIN',
altDomain: app.altDomain
};
async.eachSeries(backups, function (backup, next) {
db.runSql('UPDATE backups SET restoreConfigJson=?,creationTime=creationTime WHERE id=?', [ JSON.stringify(restoreConfig), backup.id ], next);
}, next);
});
}, callback);
});
}
], callback);
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE backups DROP COLUMN restoreConfigJson', callback);
};

View File

@@ -1,22 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.all('SELECT value FROM settings WHERE name="backup_config"', function (error, results) {
if (error || results.length === 0) return callback(error);
var backupConfig = JSON.parse(results[0].value);
if (backupConfig.provider === 'filesystem') {
backupConfig.retentionSecs = 2 * 24 * 60 * 60; // 2 days
} else if (backupConfig.provider === 's3') { // S3
backupConfig.retentionSecs = -1;
} else if (backupConfig.provider === 'caas') {
backupConfig.retentionSecs = 10 * 24 * 60 * 60; // 10 days
}
db.runSql('UPDATE settings SET value=? WHERE name="backup_config"', [ JSON.stringify(backupConfig) ], callback);
});
};
exports.down = function(db, callback) {
callback();
};

View File

@@ -1,9 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('INSERT settings (name, value) VALUES("mail_relay", ?)', [ JSON.stringify({ provider: 'cloudron-smtp' }) ], callback);
};
exports.down = function(db, callback) {
db.runSql('DELETE * FROM settings WHERE name="mail_relay"', [ ], callback);
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN robotsTxt TEXT', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN robotsTxt', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,29 +0,0 @@
'use strict';
// we used to have JSON as the db type for those two, however mariadb does not support it
// and we never used any JSON related features, but have the TEXT pattern everywhere
// This ensures all old cloudrons will have the columns altered
exports.up = function(db, callback) {
db.runSql('ALTER TABLE eventlog MODIFY data TEXT', [], function (error) {
if (error) console.error(error);
db.runSql('ALTER TABLE eventlog MODIFY source TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE eventlog MODIFY data TEXT', [], function (error) {
if (error) console.error(error);
db.runSql('ALTER TABLE eventlog MODIFY source TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
});
};

View File

@@ -1,16 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN enableBackup BOOLEAN DEFAULT 1', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN enableBackup', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE settings MODIFY value TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE settings MODIFY value VARCHAR(512)', [], function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,25 +0,0 @@
'use strict';
// ensure backupFolder and format are not empty
exports.up = function(db, callback) {
db.all('SELECT * FROM settings WHERE name=?', [ 'backup_config' ], function (error, result) {
if (error || result.length === 0) return callback(error);
var value = JSON.parse(result[0].value);
value.format = 'tgz'; // set the format
if (value.provider === 'filesystem' && !value.backupFolder) {
value.backupFolder = '/var/backups'; // set the backupFolder
}
db.runSql('UPDATE settings SET value = ? WHERE name = ?', [ JSON.stringify(value), 'backup_config' ], function (error) {
if (error) console.error('Error setting ownerid ' + JSON.stringify(u) + error);
callback();
});
});
};
exports.down = function(db, callback) {
callback();
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE backups ADD COLUMN format VARCHAR(16) DEFAULT "tgz"', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE backups DROP COLUMN format', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN newConfigJson TEXT', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN newConfigJson', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,40 +0,0 @@
'use strict';
var async = require('async');
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE backups ADD COLUMN manifestJson TEXT'),
db.runSql.bind(db, 'START TRANSACTION;'),
// fill all the backups with restoreConfigs from current apps
function addManifests(callback) {
console.log('Importing manifests');
db.all('SELECT * FROM backups WHERE type="app"', function (error, backups) {
if (error) return callback(error);
async.eachSeries(backups, function (backup, next) {
var m = backup.restoreConfigJson ? JSON.parse(backup.restoreConfigJson) : null;
if (m) m = JSON.stringify(m.manifest);
db.runSql('UPDATE backups SET manifestJson=? WHERE id=?', [ m, backup.id ], next);
}, callback);
});
},
db.runSql.bind(db, 'COMMIT'),
// remove the restoreConfig
db.runSql.bind(db, 'ALTER TABLE backups DROP COLUMN restoreConfigJson')
], callback);
};
exports.down = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE backups DROP COLUMN manifestJson'),
db.runSql.bind(db, 'ALTER TABLE backups ADD COLUMN restoreConfigJson TEXT'),
], callback);
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps CHANGE newConfigJson updateConfigJson TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps CHANGE updateConfigJson newConfigJson TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps CHANGE lastBackupId restoreConfigJson TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps CHANGE restoreConfigJson lastBackupId TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,31 +0,0 @@
'use strict';
// WARNING!!
// At this point the default db collation is utf8mb4_unicode_ci however we already have foreign key constraits
// already with tables on utf8_bin charset, so we cannot convert all tables here to utf8mb4 collation without
// a reimport from a sql dump, as foreign keys across different collations are not supported
var async = require('async');
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE appPortBindings CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
db.runSql.bind(db, 'ALTER TABLE apps CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
db.runSql.bind(db, 'ALTER TABLE authcodes CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
db.runSql.bind(db, 'ALTER TABLE backups CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
db.runSql.bind(db, 'ALTER TABLE clients CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
db.runSql.bind(db, 'ALTER TABLE eventlog CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
db.runSql.bind(db, 'ALTER TABLE groupMembers CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
db.runSql.bind(db, 'ALTER TABLE userGroups CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
db.runSql.bind(db, 'ALTER TABLE mailboxes CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
db.runSql.bind(db, 'ALTER TABLE migrations CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
db.runSql.bind(db, 'ALTER TABLE settings CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
db.runSql.bind(db, 'ALTER TABLE tokens CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
db.runSql.bind(db, 'ALTER TABLE users CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin')
], callback);
};
exports.down = function(db, callback) {
// nothing to be done here
callback();
};

View File

@@ -1,70 +0,0 @@
'use strict';
var async = require('async'),
safe = require('safetydance');
exports.up = function(db, callback) {
// first check precondtion of domain entry in settings
db.all('SELECT * FROM settings WHERE name = ?', [ 'domain' ], function (error, result) {
if (error) return callback(error);
var domain = {};
if (result[0]) domain = safe.JSON.parse(result[0].value) || {};
async.series([
db.runSql.bind(db, 'START TRANSACTION;'),
function addAppsDomainColumn(done) {
db.runSql('ALTER TABLE apps ADD COLUMN domain VARCHAR(128)', [], done);
},
function setAppDomain(done) {
if (!domain.fqdn) return done(); // skip for new cloudrons without a domain
db.runSql('UPDATE apps SET domain = ?', [ domain.fqdn ], done);
},
function addAppsLocationDomainUniqueConstraint(done) {
db.runSql('ALTER TABLE apps ADD UNIQUE location_domain_unique_index (location, domain)', [], done);
},
function removePresetupAdminGroupIfNew(done) {
// do not delete on update, will update the record in setMailboxesDomain()
if (domain.fqdn) return done();
// this will be finally created once we have a domain when we create the owner in user.js
const ADMIN_GROUP_ID = 'admin'; // see constants.js
db.runSql('DELETE FROM userGroups WHERE id = ?', [ ADMIN_GROUP_ID ], function (error) {
if (error) return done(error);
db.runSql('DELETE FROM mailboxes WHERE ownerId = ?', [ ADMIN_GROUP_ID ], done);
});
},
function addMailboxesDomainColumn(done) {
db.runSql('ALTER TABLE mailboxes ADD COLUMN domain VARCHAR(128)', [], done);
},
function setMailboxesDomain(done) {
if (!domain.fqdn) return done(); // skip for new cloudrons without a domain
db.runSql('UPDATE mailboxes SET domain = ?', [ domain.fqdn ], done);
},
function dropAppsLocationUniqueConstraint(done) {
db.runSql('ALTER TABLE apps DROP INDEX location', [], done);
},
db.runSql.bind(db, 'COMMIT')
], callback);
});
};
exports.down = function(db, callback) {
async.series([
db.runSql.bind(db, 'START TRANSACTION;'),
function dropMailboxesDomainColumn(done) {
db.runSql('ALTER TABLE mailboxes DROP COLUMN domain', [], done);
},
function dropLocationDomainUniqueConstraint(done) {
db.runSql('ALTER TABLE apps DROP INDEX location_domain_unique_index', [], done);
},
function dropAppsDomainColumn(done) {
db.runSql('ALTER TABLE apps DROP COLUMN domain', [], done);
},
function addAppsLocationUniqueConstraint(done) {
db.runSql('ALTER TABLE apps ADD UNIQUE location (location)', [], done);
},
db.runSql.bind(db, 'COMMIT')
], callback);
};

View File

@@ -1,61 +0,0 @@
'use strict';
var async = require('async'),
safe = require('safetydance'),
tld = require('tldjs');
exports.up = function(db, callback) {
var fqdn, zoneName, configJson;
async.series([
function gatherDomain(done) {
db.all('SELECT * FROM settings WHERE name = ?', [ 'domain' ], function (error, result) {
if (error) return done(error);
var domain = {};
if (result[0]) domain = safe.JSON.parse(result[0].value) || {};
fqdn = domain.fqdn || ''; // will be null pre-setup
zoneName = domain.zoneName || tld.getDomain(fqdn) || fqdn;
done();
});
},
function gatherDNSConfig(done) {
db.all('SELECT * FROM settings WHERE name = ?', [ 'dns_config' ], function (error, result) {
if (error) return done(error);
configJson = (result[0] && result[0].value) ? result[0].value : JSON.stringify({ provider: 'manual'});
// caas dns config needs an fqdn
var config = JSON.parse(configJson);
if (config.provider === 'caas') config.fqdn = fqdn;
configJson = JSON.stringify(config);
done();
});
},
db.runSql.bind(db, 'START TRANSACTION;'),
function createDomainsTable(done) {
var cmd = `
CREATE TABLE domains(
domain VARCHAR(128) NOT NULL UNIQUE,
zoneName VARCHAR(128) NOT NULL,
configJson TEXT,
PRIMARY KEY (domain)) CHARACTER SET utf8 COLLATE utf8_bin
`;
db.runSql(cmd, [], done);
},
function addInitialDomain(done) {
if (!fqdn) return done();
db.runSql('INSERT INTO domains (domain, zoneName, configJson) VALUES (?, ?, ?)', [ fqdn, zoneName, configJson ], done);
},
db.runSql.bind(db, 'COMMIT')
], callback);
};
exports.down = function(db, callback) {
db.runSql('DROP TABLE domains', callback);
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD CONSTRAINT apps_domain_constraint FOREIGN KEY(domain) REFERENCES domains(domain)', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP FOREIGN KEY apps_domain_constraint', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE mailboxes ADD CONSTRAINT mailboxes_domain_constraint FOREIGN KEY(domain) REFERENCES domains(domain)', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE mailboxes DROP FOREIGN KEY mailboxes_domain_constraint', function (error) {
if (error) console.error(error);
callback(error);
});
};

View File

@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE mailboxes DROP PRIMARY KEY', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE mailboxes ADD PRIMARY KEY(name)', function (error) {
if (error) console.error(error);
callback(error);
});
};

Some files were not shown because too many files have changed in this diff Show More