Compare commits
123 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4aae663b2e | ||
|
|
da00bce4b7 | ||
|
|
0067766284 | ||
|
|
bb0b5550e0 | ||
|
|
1db1f3faf4 | ||
|
|
9650a55c85 | ||
|
|
9451bcd38b | ||
|
|
aa7dbdd1fa | ||
|
|
ac18fb47b4 | ||
|
|
91a229305d | ||
|
|
70b0da9e38 | ||
|
|
4275114d28 | ||
|
|
83872a0a1d | ||
|
|
4d4aad084c | ||
|
|
8994a12117 | ||
|
|
28b6a340f0 | ||
|
|
1724607433 | ||
|
|
39864fbbb9 | ||
|
|
94dcec9df1 | ||
|
|
10ca889de0 | ||
|
|
cfcc210f9c | ||
|
|
38e5d2286e | ||
|
|
149e176cfd | ||
|
|
3a19ab6866 | ||
|
|
aa71a734b9 | ||
|
|
d81ee7d99a | ||
|
|
2946657889 | ||
|
|
fc6f91157d | ||
|
|
315d721174 | ||
|
|
ed7f2e7bb5 | ||
|
|
53cb9b1f7a | ||
|
|
cccdf68cec | ||
|
|
f04654022a | ||
|
|
2b92310d24 | ||
|
|
c21155f07b | ||
|
|
baded52c96 | ||
|
|
476f348693 | ||
|
|
dd58c174a8 | ||
|
|
376e070b72 | ||
|
|
f0e0372127 | ||
|
|
5e2c655ccb | ||
|
|
4a158c559e | ||
|
|
03a59cd500 | ||
|
|
b71ab187ff | ||
|
|
bbed7c1d8a | ||
|
|
c496d994c0 | ||
|
|
7a6a170451 | ||
|
|
5a6b261ba2 | ||
|
|
70fbcf8ce4 | ||
|
|
93712c0f03 | ||
|
|
e78abe2fab | ||
|
|
e190076f1a | ||
|
|
4a85207dba | ||
|
|
b0e80de9ec | ||
|
|
a546914796 | ||
|
|
3af6012779 | ||
|
|
5b51f73be4 | ||
|
|
d74537868a | ||
|
|
2056ede942 | ||
|
|
f2d366c35d | ||
|
|
0bb2da8a04 | ||
|
|
38607048ee | ||
|
|
9c413ffe3d | ||
|
|
14e1cb5ad6 | ||
|
|
aaf93cb772 | ||
|
|
8f08c52103 | ||
|
|
9ccd82ce4e | ||
|
|
013669e872 | ||
|
|
9ebdeca3ad | ||
|
|
8823487bc1 | ||
|
|
c4dffa393b | ||
|
|
a5c4b5d8a1 | ||
|
|
2f58092af2 | ||
|
|
1f7877e0e5 | ||
|
|
a304c7f4a5 | ||
|
|
601fc9a202 | ||
|
|
32e00bdf47 | ||
|
|
83fa83a709 | ||
|
|
895ccdb549 | ||
|
|
fd8741be16 | ||
|
|
3206afcd7c | ||
|
|
ab2d246945 | ||
|
|
41ec22e8c3 | ||
|
|
af54142997 | ||
|
|
c8c4f99849 | ||
|
|
48c52533c4 | ||
|
|
1a98d6d2bd | ||
|
|
615198cd36 | ||
|
|
664b3ab958 | ||
|
|
dac677df06 | ||
|
|
fd2087d7e4 | ||
|
|
d5087ff0c2 | ||
|
|
1d0ad3cb47 | ||
|
|
30c3acaed9 | ||
|
|
afd938abdf | ||
|
|
38ca8926af | ||
|
|
283f1aac21 | ||
|
|
8ba1f3914c | ||
|
|
a262b08887 | ||
|
|
925408ffcd | ||
|
|
04d4375297 | ||
|
|
691b15363a | ||
|
|
caadb1d418 | ||
|
|
382ae7424d | ||
|
|
6073d2ba7e | ||
|
|
6ecbd4a0fd | ||
|
|
92c43e58c7 | ||
|
|
dc91abb800 | ||
|
|
e19ab45e81 | ||
|
|
72daaa9ff0 | ||
|
|
8106fa3b7d | ||
|
|
282040ed1b | ||
|
|
bcd04715c0 | ||
|
|
14b2fa55c3 | ||
|
|
04e103a32d | ||
|
|
0b0c02e421 | ||
|
|
196a5cfb42 | ||
|
|
fc408b8288 | ||
|
|
e2c342f242 | ||
|
|
19fcabd32b | ||
|
|
a842d77b6d | ||
|
|
ef68cb70c0 | ||
|
|
adfb506af4 |
42
CHANGES
42
CHANGES
@@ -2186,3 +2186,45 @@
|
||||
[6.1.1]
|
||||
* Fix bug where platform does not start if memory limits could not be applied
|
||||
|
||||
[6.1.2]
|
||||
* App disk usage was not shown in graphs
|
||||
* Email autoconfig
|
||||
* Fix SOGo login
|
||||
|
||||
[6.2.0]
|
||||
* ovh: object storage URL has changed from s3 to storage subdomain
|
||||
* ionos: add profit bricks object storage
|
||||
* update node to 14.15.4
|
||||
* update docker to 20.10.3
|
||||
* new base image 3.0.0
|
||||
* postgresql updated to 12.5
|
||||
* redis updated to 5.0.7
|
||||
* dovecot updated to 2.3.7
|
||||
* proxyAuth: fix docker UA detection
|
||||
* registry config: add UI to disable it
|
||||
* update solr to 8.8.1
|
||||
* firewall: fix issue where script errored when having more than 15 wl/bl ports
|
||||
* If groups are used, do not allow app installation without choosing the access settings
|
||||
* tls addon
|
||||
* Do not overwrite existing DMARC record
|
||||
* Sync dns records
|
||||
* Dry run restore
|
||||
* linode: show cloudron is installing when user SSHs
|
||||
* mysql: disable bin logs
|
||||
* Show cancel task button if task is still running after 2 minutes
|
||||
* filemanager: fix various bugs involving file names with spaces
|
||||
* Change Referrer-policy default to 'same-origin'
|
||||
* rsync: preserve and restore symlinks
|
||||
* Clean up backups function now removes missing backups
|
||||
|
||||
[6.2.1]
|
||||
* Avoid updown notifications on full restore
|
||||
* Add retries to downloader logic in installer
|
||||
|
||||
[6.2.2]
|
||||
* Fix ENOBUFS issue with backups when collecting fs metadata
|
||||
|
||||
[6.2.3]
|
||||
* Fix addon crashes with missing databases
|
||||
* Update mail container for LMTP cert fix
|
||||
* Fix services view showing yellow icon
|
||||
|
||||
@@ -1,193 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
assertNotEmpty() {
|
||||
: "${!1:? "$1 is not set."}"
|
||||
}
|
||||
|
||||
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)"
|
||||
export JSON="${SOURCE_DIR}/node_modules/.bin/json"
|
||||
|
||||
INSTANCE_TYPE="t2.micro"
|
||||
BLOCK_DEVICE="DeviceName=/dev/sda1,Ebs={VolumeSize=20,DeleteOnTermination=true,VolumeType=gp2}"
|
||||
SSH_KEY_NAME="id_rsa_yellowtent"
|
||||
|
||||
revision=$(git rev-parse HEAD)
|
||||
ami_name=""
|
||||
server_id=""
|
||||
server_ip=""
|
||||
destroy_server="yes"
|
||||
deploy_env="prod"
|
||||
image_id=""
|
||||
|
||||
args=$(getopt -o "" -l "revision:,name:,no-destroy,env:,region:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--env) deploy_env="$2"; shift 2;;
|
||||
--revision) revision="$2"; shift 2;;
|
||||
--name) ami_name="$2"; shift 2;;
|
||||
--no-destroy) destroy_server="no"; shift 2;;
|
||||
--region)
|
||||
case "$2" in
|
||||
"us-east-1")
|
||||
image_id="ami-6edd3078"
|
||||
security_group="sg-a5e17fd9"
|
||||
subnet_id="subnet-b8fbc0f1"
|
||||
;;
|
||||
"eu-central-1")
|
||||
image_id="ami-5aee2235"
|
||||
security_group="sg-19f5a770" # everything open on eu-central-1
|
||||
subnet_id=""
|
||||
;;
|
||||
*)
|
||||
echo "Unknown aws region $2"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
export AWS_DEFAULT_REGION="$2" # used by the aws cli tool
|
||||
shift 2
|
||||
;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
# TODO fix this
|
||||
export AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY}"
|
||||
export AWS_SECRET_ACCESS_KEY="${AWS_ACCESS_SECRET}"
|
||||
|
||||
readonly ssh_keys="${HOME}/.ssh/id_rsa_yellowtent"
|
||||
readonly SSH="ssh -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
|
||||
|
||||
if [[ ! -f "${ssh_keys}" ]]; then
|
||||
echo "caas ssh key is missing at ${ssh_keys} (pick it up from secrets repo)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${image_id}" ]]; then
|
||||
echo "--region is required (us-east-1 or eu-central-1)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function get_pretty_revision() {
|
||||
local git_rev="$1"
|
||||
local sha1=$(git rev-parse --short "${git_rev}" 2>/dev/null)
|
||||
|
||||
echo "${sha1}"
|
||||
}
|
||||
|
||||
function wait_for_ssh() {
|
||||
echo "=> Waiting for ssh connection"
|
||||
while true; do
|
||||
echo -n "."
|
||||
|
||||
if $SSH ubuntu@${server_ip} echo "hello"; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
done
|
||||
}
|
||||
|
||||
now=$(date "+%Y-%m-%d-%H%M%S")
|
||||
pretty_revision=$(get_pretty_revision "${revision}")
|
||||
|
||||
if [[ -z "${ami_name}" ]]; then
|
||||
ami_name="box-${deploy_env}-${pretty_revision}-${now}"
|
||||
fi
|
||||
|
||||
echo "=> Create EC2 instance"
|
||||
id=$(aws ec2 run-instances --image-id "${image_id}" --instance-type "${INSTANCE_TYPE}" --security-group-ids "${security_group}" --block-device-mappings "${BLOCK_DEVICE}" --key-name "${SSH_KEY_NAME}" --subnet-id "${subnet_id}" --associate-public-ip-address \
|
||||
| $JSON Instances \
|
||||
| $JSON 0.InstanceId)
|
||||
|
||||
[[ -z "$id" ]] && exit 1
|
||||
echo "Instance created ID $id"
|
||||
|
||||
echo "=> Waiting for instance to get a public IP"
|
||||
while true; do
|
||||
server_ip=$(aws ec2 describe-instances --instance-ids ${id} \
|
||||
| $JSON Reservations.0.Instances \
|
||||
| $JSON 0.PublicIpAddress)
|
||||
|
||||
if [[ ! -z "${server_ip}" ]]; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "Got public IP ${server_ip}"
|
||||
|
||||
wait_for_ssh
|
||||
|
||||
echo "=> Fetching cloudron-setup"
|
||||
while true; do
|
||||
|
||||
if $SSH ubuntu@${server_ip} wget "https://cloudron.io/cloudron-setup" -O "cloudron-setup"; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo "=> Running cloudron-setup"
|
||||
$SSH ubuntu@${server_ip} sudo /bin/bash "cloudron-setup" --env "${deploy_env}" --provider "ami" --skip-reboot
|
||||
|
||||
wait_for_ssh
|
||||
|
||||
echo "=> Removing ssh key"
|
||||
$SSH ubuntu@${server_ip} sudo rm /home/ubuntu/.ssh/authorized_keys /root/.ssh/authorized_keys
|
||||
|
||||
echo "=> Creating AMI"
|
||||
image_id=$(aws ec2 create-image --instance-id "${id}" --name "${ami_name}" | $JSON ImageId)
|
||||
[[ -z "$id" ]] && exit 1
|
||||
echo "Creating AMI with Id ${image_id}"
|
||||
|
||||
echo "=> Waiting for AMI to be created"
|
||||
while true; do
|
||||
state=$(aws ec2 describe-images --image-ids ${image_id} \
|
||||
| $JSON Images \
|
||||
| $JSON 0.State)
|
||||
|
||||
if [[ "${state}" == "available" ]]; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
if [[ "${destroy_server}" == "yes" ]]; then
|
||||
echo "=> Deleting EC2 instance"
|
||||
|
||||
while true; do
|
||||
state=$(aws ec2 terminate-instances --instance-id "${id}" \
|
||||
| $JSON TerminatingInstances \
|
||||
| $JSON 0.CurrentState.Name)
|
||||
|
||||
if [[ "${state}" == "shutting-down" ]]; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 5
|
||||
done
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Done."
|
||||
echo ""
|
||||
echo "New AMI is: ${image_id}"
|
||||
echo ""
|
||||
@@ -1,261 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ -z "${DIGITAL_OCEAN_TOKEN}" ]]; then
|
||||
echo "Script requires DIGITAL_OCEAN_TOKEN env to be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${JSON}" ]]; then
|
||||
echo "Script requires JSON env to be set to path of JSON binary"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
readonly CURL="curl --retry 5 -s -u ${DIGITAL_OCEAN_TOKEN}:"
|
||||
|
||||
function debug() {
|
||||
echo "$@" >&2
|
||||
}
|
||||
|
||||
function get_ssh_key_id() {
|
||||
id=$($CURL "https://api.digitalocean.com/v2/account/keys" \
|
||||
| $JSON ssh_keys \
|
||||
| $JSON -c "this.name === \"$1\"" \
|
||||
| $JSON 0.id)
|
||||
[[ -z "$id" ]] && exit 1
|
||||
echo "$id"
|
||||
}
|
||||
|
||||
function create_droplet() {
|
||||
local ssh_key_id="$1"
|
||||
local box_name="$2"
|
||||
|
||||
local image_region="sfo2"
|
||||
local ubuntu_image_slug="ubuntu-16-04-x64"
|
||||
local box_size="1gb"
|
||||
|
||||
local data="{\"name\":\"${box_name}\",\"size\":\"${box_size}\",\"region\":\"${image_region}\",\"image\":\"${ubuntu_image_slug}\",\"ssh_keys\":[ \"${ssh_key_id}\" ],\"backups\":false}"
|
||||
|
||||
id=$($CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets" | $JSON droplet.id)
|
||||
[[ -z "$id" ]] && exit 1
|
||||
echo "$id"
|
||||
}
|
||||
|
||||
function get_droplet_ip() {
|
||||
local droplet_id="$1"
|
||||
ip=$($CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}" | $JSON "droplet.networks.v4[0].ip_address")
|
||||
[[ -z "$ip" ]] && exit 1
|
||||
echo "$ip"
|
||||
}
|
||||
|
||||
function get_droplet_id() {
|
||||
local droplet_name="$1"
|
||||
id=$($CURL "https://api.digitalocean.com/v2/droplets?per_page=200" | $JSON "droplets" | $JSON -c "this.name === '${droplet_name}'" | $JSON "[0].id")
|
||||
[[ -z "$id" ]] && exit 1
|
||||
echo "$id"
|
||||
}
|
||||
|
||||
function power_off_droplet() {
|
||||
local droplet_id="$1"
|
||||
local data='{"type":"power_off"}'
|
||||
local response=$($CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions")
|
||||
local event_id=`echo "${response}" | $JSON action.id`
|
||||
|
||||
if [[ -z "${event_id}" ]]; then
|
||||
debug "Got no event id, assuming already powered off."
|
||||
debug "Response: ${response}"
|
||||
return
|
||||
fi
|
||||
|
||||
debug "Powered off droplet. Event id: ${event_id}"
|
||||
debug -n "Waiting for droplet to power off"
|
||||
|
||||
while true; do
|
||||
local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status`
|
||||
if [[ "${event_status}" == "completed" ]]; then
|
||||
break
|
||||
fi
|
||||
debug -n "."
|
||||
sleep 10
|
||||
done
|
||||
debug ""
|
||||
}
|
||||
|
||||
function power_on_droplet() {
|
||||
local droplet_id="$1"
|
||||
local data='{"type":"power_on"}'
|
||||
local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions" | $JSON action.id`
|
||||
|
||||
debug "Powered on droplet. Event id: ${event_id}"
|
||||
|
||||
if [[ -z "${event_id}" ]]; then
|
||||
debug "Got no event id, assuming already powered on"
|
||||
return
|
||||
fi
|
||||
|
||||
debug -n "Waiting for droplet to power on"
|
||||
|
||||
while true; do
|
||||
local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status`
|
||||
if [[ "${event_status}" == "completed" ]]; then
|
||||
break
|
||||
fi
|
||||
debug -n "."
|
||||
sleep 10
|
||||
done
|
||||
debug ""
|
||||
}
|
||||
|
||||
function get_image_id() {
|
||||
local snapshot_name="$1"
|
||||
local image_id=""
|
||||
|
||||
if ! response=$($CURL "https://api.digitalocean.com/v2/images?per_page=200"); then
|
||||
echo "Failed to get image listing. ${response}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! image_id=$(echo "$response" \
|
||||
| $JSON images \
|
||||
| $JSON -c "this.name === \"${snapshot_name}\"" 0.id); then
|
||||
echo "Failed to parse curl response: ${response}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ -z "${image_id}" ]]; then
|
||||
echo "Failed to get image id of ${snapshot_name}. reponse: ${response}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "${image_id}"
|
||||
}
|
||||
|
||||
function snapshot_droplet() {
|
||||
local droplet_id="$1"
|
||||
local snapshot_name="$2"
|
||||
local data="{\"type\":\"snapshot\",\"name\":\"${snapshot_name}\"}"
|
||||
local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions" | $JSON action.id`
|
||||
|
||||
debug "Droplet snapshotted as ${snapshot_name}. Event id: ${event_id}"
|
||||
debug -n "Waiting for snapshot to complete"
|
||||
|
||||
while true; do
|
||||
if ! response=$($CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}"); then
|
||||
echo "Could not get action status. ${response}"
|
||||
continue
|
||||
fi
|
||||
if ! event_status=$(echo "${response}" | $JSON action.status); then
|
||||
echo "Could not parse action.status from response. ${response}"
|
||||
continue
|
||||
fi
|
||||
if [[ "${event_status}" == "completed" ]]; then
|
||||
break
|
||||
fi
|
||||
debug -n "."
|
||||
sleep 10
|
||||
done
|
||||
debug "! done"
|
||||
|
||||
if ! image_id=$(get_image_id "${snapshot_name}"); then
|
||||
return 1
|
||||
fi
|
||||
echo "${image_id}"
|
||||
}
|
||||
|
||||
function destroy_droplet() {
|
||||
local droplet_id="$1"
|
||||
# TODO: check for 204 status
|
||||
$CURL -X DELETE "https://api.digitalocean.com/v2/droplets/${droplet_id}"
|
||||
debug "Droplet destroyed"
|
||||
debug ""
|
||||
}
|
||||
|
||||
function transfer_image() {
|
||||
local image_id="$1"
|
||||
local region_slug="$2"
|
||||
local data="{\"type\":\"transfer\",\"region\":\"${region_slug}\"}"
|
||||
local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/images/${image_id}/actions" | $JSON action.id`
|
||||
echo "${event_id}"
|
||||
}
|
||||
|
||||
function wait_for_image_event() {
|
||||
local image_id="$1"
|
||||
local event_id="$2"
|
||||
|
||||
debug -n "Waiting for ${event_id}"
|
||||
|
||||
while true; do
|
||||
local event_status=`$CURL "https://api.digitalocean.com/v2/images/${image_id}/actions/${event_id}" | $JSON action.status`
|
||||
if [[ "${event_status}" == "completed" ]]; then
|
||||
break
|
||||
fi
|
||||
debug -n "."
|
||||
sleep 10
|
||||
done
|
||||
debug ""
|
||||
}
|
||||
|
||||
function transfer_image_to_all_regions() {
|
||||
local image_id="$1"
|
||||
|
||||
xfer_events=()
|
||||
image_regions=(ams2) ## sfo1 is where the image is created
|
||||
for image_region in ${image_regions[@]}; do
|
||||
xfer_event=$(transfer_image ${image_id} ${image_region})
|
||||
echo "Image transfer to ${image_region} initiated. Event id: ${xfer_event}"
|
||||
xfer_events+=("${xfer_event}")
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "Image transfer initiated, but they will take some time to get transferred."
|
||||
|
||||
for xfer_event in ${xfer_events[@]}; do
|
||||
$vps wait_for_image_event "${image_id}" "${xfer_event}"
|
||||
done
|
||||
}
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
debug "<command> <params...>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case $1 in
|
||||
get_ssh_key_id)
|
||||
get_ssh_key_id "${@:2}"
|
||||
;;
|
||||
|
||||
create)
|
||||
create_droplet "${@:2}"
|
||||
;;
|
||||
|
||||
get_id)
|
||||
get_droplet_id "${@:2}"
|
||||
;;
|
||||
|
||||
get_ip)
|
||||
get_droplet_ip "${@:2}"
|
||||
;;
|
||||
|
||||
power_on)
|
||||
power_on_droplet "${@:2}"
|
||||
;;
|
||||
|
||||
power_off)
|
||||
power_off_droplet "${@:2}"
|
||||
;;
|
||||
|
||||
snapshot)
|
||||
snapshot_droplet "${@:2}"
|
||||
;;
|
||||
|
||||
destroy)
|
||||
destroy_droplet "${@:2}"
|
||||
;;
|
||||
|
||||
transfer_image_to_all_regions)
|
||||
transfer_image_to_all_regions "${@:2}"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unknown command $1"
|
||||
exit 1
|
||||
esac
|
||||
@@ -72,10 +72,11 @@ apt-get -o Dpkg::Options::="--force-confold" install -y --no-install-recommends
|
||||
cp /usr/share/unattended-upgrades/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades
|
||||
|
||||
echo "==> Installing node.js"
|
||||
mkdir -p /usr/local/node-10.18.1
|
||||
curl -sL https://nodejs.org/dist/v10.18.1/node-v10.18.1-linux-x64.tar.gz | tar zxf - --strip-components=1 -C /usr/local/node-10.18.1
|
||||
ln -sf /usr/local/node-10.18.1/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-10.18.1/bin/npm /usr/bin/npm
|
||||
readonly node_version=14.15.4
|
||||
mkdir -p /usr/local/node-${node_version}
|
||||
curl -sL https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-x64.tar.gz | tar zxf - --strip-components=1 -C /usr/local/node-${node_version}
|
||||
ln -sf /usr/local/node-${node_version}/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-${node_version}/bin/npm /usr/bin/npm
|
||||
apt-get install -y --no-install-recommends python # Install python which is required for npm rebuild
|
||||
[[ "$(python --version 2>&1)" == "Python 2.7."* ]] || die "Expecting python version to be 2.7.x"
|
||||
|
||||
@@ -87,9 +88,10 @@ mkdir -p /etc/systemd/system/docker.service.d
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=overlay2" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
|
||||
# there are 3 packages for docker - containerd, CLI and the daemon
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.2.13-2_amd64.deb" -o /tmp/containerd.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_19.03.12~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_19.03.12~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
|
||||
readonly docker_version=20.10.3
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.4.3-1_amd64.deb" -o /tmp/containerd.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
rm /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
|
||||
10
migrations/20210204181904-settings-change-ovh-url.js
Normal file
10
migrations/20210204181904-settings-change-ovh-url.js
Normal file
@@ -0,0 +1,10 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
/* this contained an invalid migration of OVH URLs from s3 subdomain to storage subdomain. See https://forum.cloudron.io/topic/4584/issue-with-backups-listings-and-saving-backup-config-in-6-2 */
|
||||
callback();
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,16 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="registry_config"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
|
||||
var registryConfig = JSON.parse(results[0].value);
|
||||
if (!registryConfig.provider) registryConfig.provider = 'other';
|
||||
|
||||
db.runSql('UPDATE settings SET value=? WHERE name="registry_config"', [ JSON.stringify(registryConfig) ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
2325
package-lock.json
generated
2325
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
49
package.json
49
package.json
@@ -10,15 +10,12 @@
|
||||
"type": "git",
|
||||
"url": "https://git.cloudron.io/cloudron/box.git"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4.0.0 <=4.1.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google-cloud/dns": "^1.2.9",
|
||||
"@google-cloud/storage": "^2.5.0",
|
||||
"@google-cloud/dns": "^2.1.0",
|
||||
"@google-cloud/storage": "^5.8.0",
|
||||
"@sindresorhus/df": "git+https://github.com/cloudron-io/df.git#type",
|
||||
"async": "^2.6.3",
|
||||
"aws-sdk": "^2.828.0",
|
||||
"async": "^3.2.0",
|
||||
"aws-sdk": "^2.850.0",
|
||||
"basic-auth": "^2.0.1",
|
||||
"body-parser": "^1.19.0",
|
||||
"cloudron-manifestformat": "^5.10.1",
|
||||
@@ -31,59 +28,59 @@
|
||||
"db-migrate": "^0.11.12",
|
||||
"db-migrate-mysql": "^2.1.2",
|
||||
"debug": "^4.3.1",
|
||||
"dockerode": "^2.5.8",
|
||||
"ejs": "^2.6.1",
|
||||
"dockerode": "^3.2.1",
|
||||
"ejs": "^3.1.6",
|
||||
"ejs-cli": "^2.2.1",
|
||||
"express": "^4.17.1",
|
||||
"ipaddr.js": "^2.0.0",
|
||||
"js-yaml": "^3.14.0",
|
||||
"json": "^9.0.6",
|
||||
"js-yaml": "^4.0.0",
|
||||
"json": "^10.0.0",
|
||||
"jsonwebtoken": "^8.5.1",
|
||||
"ldapjs": "^2.2.3",
|
||||
"lodash": "^4.17.20",
|
||||
"ldapjs": "^2.2.4",
|
||||
"lodash": "^4.17.21",
|
||||
"lodash.chunk": "^4.2.0",
|
||||
"mime": "^2.5.0",
|
||||
"mime": "^2.5.2",
|
||||
"moment": "^2.29.1",
|
||||
"moment-timezone": "^0.5.32",
|
||||
"moment-timezone": "^0.5.33",
|
||||
"morgan": "^1.10.0",
|
||||
"multiparty": "^4.2.2",
|
||||
"mustache-express": "^1.3.0",
|
||||
"mysql": "^2.18.1",
|
||||
"nodemailer": "^6.4.17",
|
||||
"nodemailer": "^6.4.18",
|
||||
"nodemailer-smtp-transport": "^2.7.4",
|
||||
"once": "^1.4.0",
|
||||
"pretty-bytes": "^5.5.0",
|
||||
"pretty-bytes": "^5.6.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"proxy-middleware": "^0.15.0",
|
||||
"qrcode": "^1.4.4",
|
||||
"readdirp": "^3.5.0",
|
||||
"request": "^2.88.2",
|
||||
"rimraf": "^2.6.3",
|
||||
"rimraf": "^3.0.2",
|
||||
"s3-block-read-stream": "^0.5.0",
|
||||
"safetydance": "^1.1.1",
|
||||
"semver": "^6.1.1",
|
||||
"semver": "^7.3.4",
|
||||
"showdown": "^1.9.1",
|
||||
"speakeasy": "^2.0.0",
|
||||
"split": "^1.0.1",
|
||||
"superagent": "^5.3.1",
|
||||
"superagent": "^6.1.0",
|
||||
"supererror": "^0.7.2",
|
||||
"tar-fs": "github:cloudron-io/tar-fs#ignore_stat_error",
|
||||
"tar-stream": "^2.2.0",
|
||||
"tldjs": "^2.3.1",
|
||||
"underscore": "^1.12.0",
|
||||
"uuid": "^3.4.0",
|
||||
"validator": "^11.0.0",
|
||||
"ws": "^7.4.2",
|
||||
"uuid": "^8.3.2",
|
||||
"validator": "^13.5.2",
|
||||
"ws": "^7.4.3",
|
||||
"xml2js": "^0.4.23"
|
||||
},
|
||||
"devDependencies": {
|
||||
"expect.js": "*",
|
||||
"hock": "^1.4.1",
|
||||
"js2xmlparser": "^4.0.1",
|
||||
"mocha": "^6.2.3",
|
||||
"mocha": "^8.3.0",
|
||||
"mock-aws-s3": "git+https://github.com/cloudron-io/mock-aws-s3.git",
|
||||
"nock": "^10.0.6",
|
||||
"node-sass": "^4.14.1",
|
||||
"nock": "^13.0.7",
|
||||
"node-sass": "^5.0.0",
|
||||
"recursive-readdir": "^2.2.2"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
function exitHandler() {
|
||||
rm -f /etc/update-motd.d/91-cloudron-install-in-progress
|
||||
}
|
||||
|
||||
trap exitHandler EXIT
|
||||
|
||||
# change this to a hash when we make a upgrade release
|
||||
readonly LOG_FILE="/var/log/cloudron-setup.log"
|
||||
readonly MINIMUM_DISK_SIZE_GB="18" # this is the size of "/" and required to fit in docker images 18 is a safe bet for different reporting on 20GB min
|
||||
@@ -92,6 +98,26 @@ if [[ "${ubuntu_version}" != "16.04" && "${ubuntu_version}" != "18.04" && "${ubu
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install MOTD file for stack script style installations. this is removed by the trap exit handler. Heredoc quotes prevents parameter expansion
|
||||
cat > /etc/update-motd.d/91-cloudron-install-in-progress <<'EOF'
|
||||
#!/bin/bash
|
||||
|
||||
printf "**********************************************************************\n\n"
|
||||
|
||||
printf "\t\t\tWELCOME TO CLOUDRON\n"
|
||||
printf "\t\t\t-------------------\n"
|
||||
|
||||
printf '\n\e[1;32m%-6s\e[m\n\n' "Cloudron is installing. Run 'tail -f /var/log/cloudron-setup.log' to view progress."
|
||||
|
||||
printf "Cloudron overview - https://docs.cloudron.io/ \n"
|
||||
printf "Cloudron setup - https://docs.cloudron.io/installation/#setup \n"
|
||||
|
||||
printf "\nFor help and more information, visit https://forum.cloudron.io\n\n"
|
||||
|
||||
printf "**********************************************************************\n"
|
||||
EOF
|
||||
chmod +x /etc/update-motd.d/91-cloudron-install-in-progress
|
||||
|
||||
# Can only write after we have confirmed script has root access
|
||||
echo "Running cloudron-setup with args : $@" > "${LOG_FILE}"
|
||||
|
||||
@@ -193,7 +219,7 @@ if [[ "${rebootServer}" == "true" ]]; then
|
||||
read -p "The server has to be rebooted to apply all the settings. Reboot now ? [Y/n] " yn
|
||||
yn=${yn:-y}
|
||||
case $yn in
|
||||
[Yy]* ) systemctl reboot;;
|
||||
[Yy]* ) exitHandler; systemctl reboot;;
|
||||
* ) exit;;
|
||||
esac
|
||||
fi
|
||||
|
||||
@@ -73,6 +73,9 @@ echo -n "Generating Cloudron Support stats..."
|
||||
# clear file
|
||||
rm -rf $OUT
|
||||
|
||||
echo -e $LINE"DASHBOARD DOMAIN"$LINE >> $OUT
|
||||
mysql -NB -uroot -ppassword -e "SELECT value FROM box.settings WHERE name='admin_fqdn'" &>> $OUT 2>/dev/null || true
|
||||
|
||||
echo -e $LINE"PROVIDER"$LINE >> $OUT
|
||||
cat /etc/cloudron/PROVIDER &>> $OUT || true
|
||||
|
||||
@@ -99,7 +102,7 @@ systemctl status --lines=100 box mysql unbound cloudron-syslog nginx collectd do
|
||||
echo -e $LINE"Box logs"$LINE >> $OUT
|
||||
tail -n 100 /home/yellowtent/platformdata/logs/box.log &>> $OUT
|
||||
|
||||
echo -e $LINE"Firewall chains"$LINE >> $OUT
|
||||
echo -e $LINE"Interface Info"$LINE >> $OUT
|
||||
ip addr &>> $OUT
|
||||
|
||||
echo -e $LINE"Firewall chains"$LINE >> $OUT
|
||||
|
||||
@@ -41,8 +41,8 @@ if ! $(cd "${SOURCE_DIR}/../dashboard" && git diff --exit-code >/dev/null); then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$(node --version)" != "v10.18.1" ]]; then
|
||||
echo "This script requires node 10.18.1"
|
||||
if [[ "$(node --version)" != "v14.15.4" ]]; then
|
||||
echo "This script requires node 14.15.4"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
@@ -11,6 +11,10 @@ if [[ ${EUID} -ne 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function log() {
|
||||
echo -e "$(date +'%Y-%m-%dT%H:%M:%S')" "==> installer: $1"
|
||||
}
|
||||
|
||||
readonly user=yellowtent
|
||||
readonly box_src_dir=/home/${user}/box
|
||||
|
||||
@@ -21,36 +25,37 @@ readonly box_src_tmp_dir="$(realpath ${script_dir}/..)"
|
||||
readonly ubuntu_version=$(lsb_release -rs)
|
||||
readonly ubuntu_codename=$(lsb_release -cs)
|
||||
|
||||
readonly is_update=$(systemctl is-active box && echo "yes" || echo "no")
|
||||
readonly is_update=$(systemctl is-active -q box && echo "yes" || echo "no")
|
||||
|
||||
echo "==> installer: Updating from $(cat $box_src_dir/VERSION) to $(cat $box_src_tmp_dir/VERSION) <=="
|
||||
log "Updating from $(cat $box_src_dir/VERSION) to $(cat $box_src_tmp_dir/VERSION)"
|
||||
|
||||
echo "==> installer: updating docker"
|
||||
log "updating docker"
|
||||
|
||||
if [[ $(docker version --format {{.Client.Version}}) != "19.03.12" ]]; then
|
||||
readonly docker_version=20.10.3
|
||||
if [[ $(docker version --format {{.Client.Version}}) != "${docker_version}" ]]; then
|
||||
# there are 3 packages for docker - containerd, CLI and the daemon
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.2.13-2_amd64.deb" -o /tmp/containerd.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_19.03.12~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_19.03.12~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.4.3-1_amd64.deb" -o /tmp/containerd.deb
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
|
||||
|
||||
echo "==> installer: Waiting for all dpkg tasks to finish..."
|
||||
log "Waiting for all dpkg tasks to finish..."
|
||||
while fuser /var/lib/dpkg/lock; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
while ! dpkg --force-confold --configure -a; do
|
||||
echo "==> installer: Failed to fix packages. Retry"
|
||||
log "Failed to fix packages. Retry"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# the latest docker might need newer packages
|
||||
while ! apt update -y; do
|
||||
echo "==> installer: Failed to update packages. Retry"
|
||||
log "Failed to update packages. Retry"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
while ! apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb; do
|
||||
echo "==> installer: Failed to install docker. Retry"
|
||||
log "Failed to install docker. Retry"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
@@ -59,24 +64,21 @@ fi
|
||||
|
||||
readonly nginx_version=$(nginx -v 2>&1)
|
||||
if [[ "${nginx_version}" != *"1.18."* ]]; then
|
||||
echo "==> installer: installing nginx 1.18"
|
||||
curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
|
||||
log "installing nginx 1.18"
|
||||
$curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes /tmp/nginx.deb
|
||||
rm /tmp/nginx.deb
|
||||
fi
|
||||
|
||||
# Cloudron 6 on ubuntu 20 installed recommended packages of collectd -> libinotify -> gnome->shell
|
||||
apt remove -y gnome-shell || true
|
||||
apt -y autoremove || true
|
||||
|
||||
echo "==> installer: updating node"
|
||||
if [[ "$(node --version)" != "v10.18.1" ]]; then
|
||||
mkdir -p /usr/local/node-10.18.1
|
||||
$curl -sL https://nodejs.org/dist/v10.18.1/node-v10.18.1-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-10.18.1
|
||||
ln -sf /usr/local/node-10.18.1/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-10.18.1/bin/npm /usr/bin/npm
|
||||
rm -rf /usr/local/node-10.15.1
|
||||
log "updating node"
|
||||
readonly node_version=14.15.4
|
||||
if [[ "$(node --version)" != "v${node_version}" ]]; then
|
||||
mkdir -p /usr/local/node-${node_version}
|
||||
$curl -sL https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-${node_version}
|
||||
ln -sf /usr/local/node-${node_version}/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-${node_version}/bin/npm /usr/bin/npm
|
||||
rm -rf /usr/local/node-10.18.1
|
||||
fi
|
||||
|
||||
# this is here (and not in updater.js) because rebuild requires the above node
|
||||
@@ -87,31 +89,31 @@ for try in `seq 1 10`; do
|
||||
# however by default npm drops privileges for npm rebuild
|
||||
# https://docs.npmjs.com/misc/config#unsafe-perm
|
||||
if cd "${box_src_tmp_dir}" && npm rebuild --unsafe-perm; then break; fi
|
||||
echo "==> installer: Failed to rebuild, trying again"
|
||||
log "Failed to rebuild, trying again"
|
||||
sleep 5
|
||||
done
|
||||
|
||||
if [[ ${try} -eq 10 ]]; then
|
||||
echo "==> installer: npm rebuild failed, giving up"
|
||||
log "npm rebuild failed, giving up"
|
||||
exit 4
|
||||
fi
|
||||
|
||||
echo "==> installer: downloading new addon images"
|
||||
log "downloading new addon images"
|
||||
images=$(node -e "var i = require('${box_src_tmp_dir}/src/infra_version.js'); console.log(i.baseImages.map(function (x) { return x.tag; }).join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
|
||||
|
||||
echo -e "\tPulling docker images: ${images}"
|
||||
log "\tPulling docker images: ${images}"
|
||||
for image in ${images}; do
|
||||
if ! docker pull "${image}"; then # this pulls the image using the sha256
|
||||
echo "==> installer: Could not pull ${image}"
|
||||
exit 5
|
||||
fi
|
||||
if ! docker pull "${image%@sha256:*}"; then # this will tag the image for readability
|
||||
echo "==> installer: Could not pull ${image%@sha256:*}"
|
||||
exit 6
|
||||
fi
|
||||
while ! docker pull "${image}"; do # this pulls the image using the sha256
|
||||
log "Could not pull ${image}"
|
||||
sleep 5
|
||||
done
|
||||
while ! docker pull "${image%@sha256:*}"; do # this will tag the image for readability
|
||||
log "Could not pull ${image%@sha256:*}"
|
||||
sleep 5
|
||||
done
|
||||
done
|
||||
|
||||
echo "==> installer: update cloudron-syslog"
|
||||
log "update cloudron-syslog"
|
||||
CLOUDRON_SYSLOG_DIR=/usr/local/cloudron-syslog
|
||||
CLOUDRON_SYSLOG="${CLOUDRON_SYSLOG_DIR}/bin/cloudron-syslog"
|
||||
CLOUDRON_SYSLOG_VERSION="1.0.3"
|
||||
@@ -119,7 +121,7 @@ while [[ ! -f "${CLOUDRON_SYSLOG}" || "$(${CLOUDRON_SYSLOG} --version)" != ${CLO
|
||||
rm -rf "${CLOUDRON_SYSLOG_DIR}"
|
||||
mkdir -p "${CLOUDRON_SYSLOG_DIR}"
|
||||
if npm install --unsafe-perm -g --prefix "${CLOUDRON_SYSLOG_DIR}" cloudron-syslog@${CLOUDRON_SYSLOG_VERSION}; then break; fi
|
||||
echo "===> installer: Failed to install cloudron-syslog, trying again"
|
||||
log "Failed to install cloudron-syslog, trying again"
|
||||
sleep 5
|
||||
done
|
||||
|
||||
@@ -128,17 +130,17 @@ if ! id "${user}" 2>/dev/null; then
|
||||
fi
|
||||
|
||||
if [[ "${is_update}" == "yes" ]]; then
|
||||
echo "==> installer: stop box service for update"
|
||||
log "stop box service for update"
|
||||
${box_src_dir}/setup/stop.sh
|
||||
fi
|
||||
|
||||
# ensure we are not inside the source directory, which we will remove now
|
||||
cd /root
|
||||
|
||||
echo "==> installer: switching the box code"
|
||||
log "switching the box code"
|
||||
rm -rf "${box_src_dir}"
|
||||
mv "${box_src_tmp_dir}" "${box_src_dir}"
|
||||
chown -R "${user}:${user}" "${box_src_dir}"
|
||||
|
||||
echo "==> installer: calling box setup script"
|
||||
log "calling box setup script"
|
||||
"${box_src_dir}/setup/start.sh"
|
||||
|
||||
@@ -5,7 +5,11 @@ set -eu -o pipefail
|
||||
# This script is run after the box code is switched. This means that this script
|
||||
# should pretty much always succeed. No network logic/download code here.
|
||||
|
||||
echo "==> Cloudron Start"
|
||||
function log() {
|
||||
echo -e "$(date +'%Y-%m-%dT%H:%M:%S')" "==> start: $1"
|
||||
}
|
||||
|
||||
log "Cloudron Start"
|
||||
|
||||
readonly USER="yellowtent"
|
||||
readonly HOME_DIR="/home/${USER}"
|
||||
@@ -26,7 +30,7 @@ if ! getent group media; then
|
||||
addgroup --gid 500 --system media
|
||||
fi
|
||||
|
||||
echo "==> Configuring docker"
|
||||
log "Configuring docker"
|
||||
cp "${script_dir}/start/docker-cloudron-app.apparmor" /etc/apparmor.d/docker-cloudron-app
|
||||
systemctl enable apparmor
|
||||
systemctl restart apparmor
|
||||
@@ -39,7 +43,7 @@ mkdir -p "${BOX_DATA_DIR}"
|
||||
mkdir -p "${APPS_DATA_DIR}"
|
||||
|
||||
# keep these in sync with paths.js
|
||||
echo "==> Ensuring directories"
|
||||
log "Ensuring directories"
|
||||
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/graphite"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/mysql"
|
||||
@@ -71,7 +75,7 @@ mkdir -p "${BOX_DATA_DIR}/sftp/ssh" # sftp keys
|
||||
mkdir -p /var/backups
|
||||
chmod 777 /var/backups
|
||||
|
||||
echo "==> Configuring journald"
|
||||
log "Configuring journald"
|
||||
sed -e "s/^#SystemMaxUse=.*$/SystemMaxUse=100M/" \
|
||||
-e "s/^#ForwardToSyslog=.*$/ForwardToSyslog=no/" \
|
||||
-i /etc/systemd/journald.conf
|
||||
@@ -92,7 +96,7 @@ setfacl -n -m u:${USER}:r /var/log/journal/*/system.journal
|
||||
# Give user access to nginx logs (uses adm group)
|
||||
usermod -a -G adm ${USER}
|
||||
|
||||
echo "==> Setting up unbound"
|
||||
log "Setting up unbound"
|
||||
# DO uses Google nameservers by default. This causes RBL queries to fail (host 2.0.0.127.zen.spamhaus.org)
|
||||
# We do not use dnsmasq because it is not a recursive resolver and defaults to the value in the interfaces file (which is Google DNS!)
|
||||
# We listen on 0.0.0.0 because there is no way control ordering of docker (which creates the 172.18.0.0/16) and unbound
|
||||
@@ -102,7 +106,7 @@ cp -f "${script_dir}/start/unbound.conf" /etc/unbound/unbound.conf.d/cloudron-ne
|
||||
# update the root anchor after a out-of-disk-space situation (see #269)
|
||||
unbound-anchor -a /var/lib/unbound/root.key
|
||||
|
||||
echo "==> Adding systemd services"
|
||||
log "Adding systemd services"
|
||||
cp -r "${script_dir}/start/systemd/." /etc/systemd/system/
|
||||
[[ "${ubuntu_version}" == "16.04" ]] && sed -e 's/MemoryMax/MemoryLimit/g' -i /etc/systemd/system/box.service
|
||||
systemctl daemon-reload
|
||||
@@ -124,11 +128,11 @@ systemctl restart unbound
|
||||
# ensure cloudron-syslog runs
|
||||
systemctl restart cloudron-syslog
|
||||
|
||||
echo "==> Configuring sudoers"
|
||||
log "Configuring sudoers"
|
||||
rm -f /etc/sudoers.d/${USER}
|
||||
cp "${script_dir}/start/sudoers" /etc/sudoers.d/${USER}
|
||||
|
||||
echo "==> Configuring collectd"
|
||||
log "Configuring collectd"
|
||||
rm -rf /etc/collectd /var/log/collectd.log
|
||||
ln -sfF "${PLATFORM_DATA_DIR}/collectd" /etc/collectd
|
||||
cp "${script_dir}/start/collectd/collectd.conf" "${PLATFORM_DATA_DIR}/collectd/collectd.conf"
|
||||
@@ -140,7 +144,7 @@ if [[ "${ubuntu_version}" == "20.04" ]]; then
|
||||
fi
|
||||
systemctl restart collectd
|
||||
|
||||
echo "==> Configuring logrotate"
|
||||
log "Configuring logrotate"
|
||||
if ! grep -q "^include ${PLATFORM_DATA_DIR}/logrotate.d" /etc/logrotate.conf; then
|
||||
echo -e "\ninclude ${PLATFORM_DATA_DIR}/logrotate.d\n" >> /etc/logrotate.conf
|
||||
fi
|
||||
@@ -150,10 +154,10 @@ cp "${script_dir}/start/logrotate/"* "${PLATFORM_DATA_DIR}/logrotate.d/"
|
||||
# logrotate files have to be owned by root, this is here to fixup existing installations where we were resetting the owner to yellowtent
|
||||
chown root:root "${PLATFORM_DATA_DIR}/logrotate.d/"
|
||||
|
||||
echo "==> Adding motd message for admins"
|
||||
log "Adding motd message for admins"
|
||||
cp "${script_dir}/start/cloudron-motd" /etc/update-motd.d/92-cloudron
|
||||
|
||||
echo "==> Configuring nginx"
|
||||
log "Configuring nginx"
|
||||
# link nginx config to system config
|
||||
unlink /etc/nginx 2>/dev/null || rm -rf /etc/nginx
|
||||
ln -s "${PLATFORM_DATA_DIR}/nginx" /etc/nginx
|
||||
@@ -181,18 +185,26 @@ if [[ ! -f /etc/mysql/mysql.cnf ]] || ! diff -q "${script_dir}/start/mysql.cnf"
|
||||
cp "${script_dir}/start/mysql.cnf" /etc/mysql/mysql.cnf
|
||||
while true; do
|
||||
if ! systemctl list-jobs | grep mysql; then break; fi
|
||||
echo "Waiting for mysql jobs..."
|
||||
log "Waiting for mysql jobs..."
|
||||
sleep 1
|
||||
done
|
||||
while true; do
|
||||
if systemctl restart mysql; then break; fi
|
||||
echo "Restarting MySql again after sometime since this fails randomly"
|
||||
log "Stopping mysql"
|
||||
systemctl stop mysql
|
||||
while mysqladmin ping 2>/dev/null; do
|
||||
log "Waiting for mysql to stop..."
|
||||
sleep 1
|
||||
done
|
||||
else
|
||||
systemctl start mysql
|
||||
fi
|
||||
|
||||
# the start/stop of mysql is separate to make sure it got reloaded with latest config and it's up and running before we start the new box code
|
||||
# when using 'system restart mysql', it seems to restart much later and the box code loses connection during platform startup (dangerous!)
|
||||
log "Starting mysql"
|
||||
systemctl start mysql
|
||||
while ! mysqladmin ping 2>/dev/null; do
|
||||
log "Waiting for mysql to start..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
readonly mysql_root_password="password"
|
||||
mysqladmin -u root -ppassword password password # reset default root password
|
||||
if [[ "${ubuntu_version}" == "20.04" ]]; then
|
||||
@@ -203,17 +215,17 @@ mysql -u root -p${mysql_root_password} -e 'CREATE DATABASE IF NOT EXISTS box'
|
||||
|
||||
# set HOME explicity, because it's not set when the installer calls it. this is done because
|
||||
# paths.js uses this env var and some of the migrate code requires box code
|
||||
echo "==> Migrating data"
|
||||
log "Migrating data"
|
||||
cd "${BOX_SRC_DIR}"
|
||||
if ! HOME=${HOME_DIR} BOX_ENV=cloudron DATABASE_URL=mysql://root:${mysql_root_password}@127.0.0.1/box "${BOX_SRC_DIR}/node_modules/.bin/db-migrate" up; then
|
||||
echo "DB migration failed"
|
||||
log "DB migration failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -f /etc/cloudron/cloudron.conf
|
||||
|
||||
if [[ ! -f "${BOX_DATA_DIR}/dhparams.pem" ]]; then
|
||||
echo "==> Generating dhparams (takes forever)"
|
||||
log "Generating dhparams (takes forever)"
|
||||
openssl dhparam -out "${BOX_DATA_DIR}/dhparams.pem" 2048
|
||||
cp "${BOX_DATA_DIR}/dhparams.pem" "${PLATFORM_DATA_DIR}/addons/mail/dhparams.pem"
|
||||
else
|
||||
@@ -230,7 +242,7 @@ if [[ ! -f "${BOX_DATA_DIR}/sftp/ssh/ssh_host_rsa_key" ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "==> Changing ownership"
|
||||
log "Changing ownership"
|
||||
# be careful of what is chown'ed here. subdirs like mysql,redis etc are owned by the containers and will stop working if perms change
|
||||
chown -R "${USER}" /etc/cloudron
|
||||
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme" "${PLATFORM_DATA_DIR}/backup" "${PLATFORM_DATA_DIR}/logs" "${PLATFORM_DATA_DIR}/update"
|
||||
@@ -244,9 +256,9 @@ find "${BOX_DATA_DIR}" -mindepth 1 -maxdepth 1 -not -path "${BOX_DATA_DIR}/mail"
|
||||
chown "${USER}:${USER}" "${BOX_DATA_DIR}/mail"
|
||||
chown "${USER}:${USER}" -R "${BOX_DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys
|
||||
|
||||
echo "==> Starting Cloudron"
|
||||
log "Starting Cloudron"
|
||||
systemctl start box
|
||||
|
||||
sleep 2 # give systemd sometime to start the processes
|
||||
|
||||
echo "==> Almost done"
|
||||
log "Almost done"
|
||||
|
||||
@@ -20,14 +20,20 @@ fi
|
||||
iptables -t filter -A CLOUDRON -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p tcp -m tcp -m multiport --dports 22,25,80,202,443 -j ACCEPT # 202 is the alternate ssh port
|
||||
|
||||
# whitelist any user ports
|
||||
# whitelist any user ports. we used to use --dports but it has a 15 port limit (XT_MULTI_PORTS)
|
||||
ports_json="/home/yellowtent/boxdata/firewall/ports.json"
|
||||
if allowed_tcp_ports=$(node -e "console.log(JSON.parse(fs.readFileSync('${ports_json}', 'utf8')).allowed_tcp_ports.join(','))" 2>/dev/null); then
|
||||
[[ -n "${allowed_tcp_ports}" ]] && iptables -A CLOUDRON -p tcp -m tcp -m multiport --dports "${allowed_tcp_ports}" -j ACCEPT
|
||||
IFS=',' arr=(${allowed_tcp_ports})
|
||||
for p in "${arr[@]}"; do
|
||||
iptables -A CLOUDRON -p tcp -m tcp --dport "${p}" -j ACCEPT
|
||||
done
|
||||
fi
|
||||
|
||||
if allowed_udp_ports=$(node -e "console.log(JSON.parse(fs.readFileSync('${ports_json}', 'utf8')).allowed_udp_ports.join(','))" 2>/dev/null); then
|
||||
[[ -n "${allowed_tcp_ports}" ]] && iptables -A CLOUDRON -p udp -m udp -m multiport --dports "${allowed_tcp_ports}" -j ACCEPT
|
||||
IFS=',' arr=(${allowed_udp_ports})
|
||||
for p in "${arr[@]}"; do
|
||||
iptables -A CLOUDRON -p udp -m udp --dport "${p}" -j ACCEPT
|
||||
done
|
||||
fi
|
||||
|
||||
# turn and stun service
|
||||
@@ -92,3 +98,5 @@ fi
|
||||
# Workaround issue where Docker insists on adding itself first in FORWARD table
|
||||
iptables -D FORWARD -j CLOUDRON_RATELIMIT || true
|
||||
iptables -I FORWARD 1 -j CLOUDRON_RATELIMIT
|
||||
|
||||
echo "==> Setting up firewall done"
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
[[ -f /etc/update-motd.d/91-cloudron-install-in-progress ]] && exit
|
||||
|
||||
printf "**********************************************************************\n\n"
|
||||
|
||||
if [[ -z "$(ls -A /home/yellowtent/boxdata/mail/dkim)" ]]; then
|
||||
|
||||
@@ -15,6 +15,9 @@ collation-server = utf8mb4_unicode_ci
|
||||
# set timezone to UTC
|
||||
default_time_zone='+00:00'
|
||||
|
||||
# disable bin logs. they are only useful in replication mode
|
||||
skip-log-bin
|
||||
|
||||
[mysqldump]
|
||||
quick
|
||||
quote-names
|
||||
|
||||
@@ -25,9 +25,6 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/configurecollec
|
||||
Defaults!/home/yellowtent/box/src/scripts/collectlogs.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/collectlogs.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/retire.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/retire.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/update.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/update.sh
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
[Unit]
|
||||
Description=Cloudron Admin
|
||||
OnFailure=crashnotifier@%n.service
|
||||
; journald crashes result in a EPIPE in node. Cannot ignore it as it results in loss of logs.
|
||||
BindsTo=systemd-journald.service
|
||||
After=mysql.service nginx.service
|
||||
; As cloudron-resize-fs is a one-shot, the Wants= automatically ensures that the service *finishes*
|
||||
Wants=cloudron-resize-fs.service
|
||||
|
||||
@@ -10,51 +10,48 @@ var appdb = require('./appdb.js'),
|
||||
docker = require('./docker.js'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util');
|
||||
superagent = require('superagent');
|
||||
|
||||
exports = module.exports = {
|
||||
run
|
||||
};
|
||||
|
||||
const HEALTHCHECK_INTERVAL = 10 * 1000; // every 10 seconds. this needs to be small since the UI makes only healthy apps clickable
|
||||
const UNHEALTHY_THRESHOLD = 10 * 60 * 1000; // 10 minutes
|
||||
const UNHEALTHY_THRESHOLD = 20 * 60 * 1000; // 20 minutes
|
||||
|
||||
const OOM_EVENT_LIMIT = 60 * 60 * 1000; // 60 minutes
|
||||
const gStartTime = new Date(); // time when apphealthmonitor was started
|
||||
let gStartTime = null; // time when apphealthmonitor was started
|
||||
let gLastOomMailTime = Date.now() - (5 * 60 * 1000); // pretend we sent email 5 minutes ago
|
||||
|
||||
function debugApp(app) {
|
||||
assert(typeof app === 'object');
|
||||
|
||||
debug(app.fqdn + ' ' + util.format.apply(util, Array.prototype.slice.call(arguments, 1)) + ' - ' + app.id);
|
||||
}
|
||||
|
||||
function setHealth(app, health, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof health, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
let now = new Date(), curHealth = app.health;
|
||||
// app starts out with null health
|
||||
// if it became healthy, we update immediately. this is required for ui to say "running" etc
|
||||
// if it became unhealthy/error/dead, wait for a threshold before updating db
|
||||
|
||||
const now = new Date(), lastHealth = app.health;
|
||||
let healthTime = gStartTime > app.healthTime ? gStartTime : app.healthTime; // on box restart, clamp value to start time
|
||||
|
||||
if (health === apps.HEALTH_HEALTHY) {
|
||||
healthTime = now;
|
||||
if (curHealth && curHealth !== apps.HEALTH_HEALTHY) { // app starts out with null health
|
||||
debugApp(app, 'app switched from %s to healthy', curHealth);
|
||||
if (lastHealth && lastHealth !== apps.HEALTH_HEALTHY) { // app starts out with null health
|
||||
debug(`setHealth: ${app.id} (${app.fqdn}) switched from ${lastHealth} to healthy`);
|
||||
|
||||
// do not send mails for dev apps
|
||||
if (!app.debugMode) eventlog.add(eventlog.ACTION_APP_UP, auditSource.HEALTH_MONITOR, { app: app });
|
||||
}
|
||||
} else if (Math.abs(now - healthTime) > UNHEALTHY_THRESHOLD) {
|
||||
if (curHealth === apps.HEALTH_HEALTHY) {
|
||||
debugApp(app, 'marking as unhealthy since not seen for more than %s minutes', UNHEALTHY_THRESHOLD/(60 * 1000));
|
||||
if (lastHealth === apps.HEALTH_HEALTHY) {
|
||||
debug(`setHealth: marking ${app.id} (${app.fqdn}) as unhealthy since not seen for more than ${UNHEALTHY_THRESHOLD/(60 * 1000)} minutes`);
|
||||
|
||||
// do not send mails for dev apps
|
||||
if (!app.debugMode) eventlog.add(eventlog.ACTION_APP_DOWN, auditSource.HEALTH_MONITOR, { app: app });
|
||||
}
|
||||
} else {
|
||||
debugApp(app, 'waiting for %s seconds to update the app health', (UNHEALTHY_THRESHOLD - Math.abs(now - healthTime))/1000);
|
||||
debug(`setHealth: ${app.id} (${app.fqdn}) waiting for ${(UNHEALTHY_THRESHOLD - Math.abs(now - healthTime))/1000} to update health`);
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
@@ -63,6 +60,7 @@ function setHealth(app, health, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
app.health = health;
|
||||
app.healthTime = healthTime;
|
||||
|
||||
callback(null);
|
||||
});
|
||||
@@ -189,6 +187,8 @@ function run(intervalSecs, callback) {
|
||||
assert.strictEqual(typeof intervalSecs, 'number');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (!gStartTime) gStartTime = new Date();
|
||||
|
||||
async.series([
|
||||
processApp, // this is first because docker.getEvents seems to get 'stuck' sometimes
|
||||
processDockerEvents.bind(null, intervalSecs)
|
||||
|
||||
27
src/apps.js
27
src/apps.js
@@ -182,6 +182,11 @@ function validatePortBindings(portBindings, manifest) {
|
||||
[50000, 51000] /* turn udp ports */
|
||||
];
|
||||
|
||||
const ALLOWED_PORTS = [
|
||||
53, // dns 53 is special and adblocker apps can use them
|
||||
853 // dns over tls
|
||||
];
|
||||
|
||||
if (!portBindings) return null;
|
||||
|
||||
for (let portName in portBindings) {
|
||||
@@ -191,7 +196,7 @@ function validatePortBindings(portBindings, manifest) {
|
||||
if (!Number.isInteger(hostPort)) return new BoxError(BoxError.BAD_FIELD, `${hostPort} is not an integer`, { field: 'portBindings', portName: portName });
|
||||
if (RESERVED_PORTS.indexOf(hostPort) !== -1) return new BoxError(BoxError.BAD_FIELD, `Port ${hostPort} is reserved.`, { field: 'portBindings', portName: portName });
|
||||
if (RESERVED_PORT_RANGES.find(range => (hostPort >= range[0] && hostPort <= range[1]))) return new BoxError(BoxError.BAD_FIELD, `Port ${hostPort} is reserved.`, { field: 'portBindings', portName: portName });
|
||||
if (hostPort !== 53 && (hostPort <= 1023 || hostPort > 65535)) return new BoxError(BoxError.BAD_FIELD, `${hostPort} is not in permitted range`, { field: 'portBindings', portName: portName }); // dns 53 is special and adblocker apps can use them
|
||||
if (ALLOWED_PORTS.indexOf(hostPort) === -1 && (hostPort <= 1023 || hostPort > 65535)) return new BoxError(BoxError.BAD_FIELD, `${hostPort} is not in permitted range`, { field: 'portBindings', portName: portName });
|
||||
}
|
||||
|
||||
// it is OK if there is no 1-1 mapping between values in manifest.tcpPorts and portBindings. missing values implies
|
||||
@@ -412,7 +417,7 @@ function removeInternalFields(app) {
|
||||
// non-admins can only see these
|
||||
function removeRestrictedFields(app) {
|
||||
return _.pick(app,
|
||||
'id', 'appStoreId', 'installationState', 'error', 'runState', 'health', 'taskId', 'alternateDomains', 'aliasDomains', 'sso',
|
||||
'id', 'appStoreId', 'installationState', 'error', 'runState', 'health', 'taskId', 'accessRestriction', 'alternateDomains', 'aliasDomains', 'sso',
|
||||
'location', 'domain', 'fqdn', 'manifest', 'portBindings', 'iconUrl', 'creationTime', 'ts', 'tags', 'label', 'enableBackup');
|
||||
}
|
||||
|
||||
@@ -749,6 +754,7 @@ function install(data, auditSource, callback) {
|
||||
label = data.label || null,
|
||||
tags = data.tags || [],
|
||||
overwriteDns = 'overwriteDns' in data ? data.overwriteDns : false,
|
||||
skipDnsSetup = 'skipDnsSetup' in data ? data.skipDnsSetup : false,
|
||||
appStoreId = data.appStoreId,
|
||||
manifest = data.manifest;
|
||||
|
||||
@@ -843,7 +849,7 @@ function install(data, auditSource, callback) {
|
||||
}
|
||||
|
||||
const task = {
|
||||
args: { restoreConfig: null, overwriteDns },
|
||||
args: { restoreConfig: null, skipDnsSetup, overwriteDns },
|
||||
values: { },
|
||||
requiredState: data.installationState
|
||||
};
|
||||
@@ -1245,6 +1251,7 @@ function setLocation(app, data, auditSource, callback) {
|
||||
const task = {
|
||||
args: {
|
||||
oldConfig: _.pick(app, 'location', 'domain', 'fqdn', 'alternateDomains', 'aliasDomains', 'portBindings'),
|
||||
skipDnsSetup: !!data.skipDnsSetup,
|
||||
overwriteDns: !!data.overwriteDns
|
||||
},
|
||||
values
|
||||
@@ -1450,7 +1457,7 @@ function repair(app, data, auditSource, callback) {
|
||||
|
||||
// maybe split this into a separate route like reinstall?
|
||||
if (errorState === exports.ISTATE_PENDING_INSTALL || errorState === exports.ISTATE_PENDING_CLONE) {
|
||||
task.args = { overwriteDns: true };
|
||||
task.args = { skipDnsSetup: false, overwriteDns: true };
|
||||
if (data.manifest) {
|
||||
let error = manifestFormat.parse(data.manifest);
|
||||
if (error) return callback(new BoxError(BoxError.BAD_FIELD, `manifest error: ${error.message}`));
|
||||
@@ -1523,6 +1530,7 @@ function restore(app, backupId, auditSource, callback) {
|
||||
args: {
|
||||
restoreConfig,
|
||||
oldManifest: app.manifest,
|
||||
skipDnsSetup: !!backupId, // if this is a restore, just skip dns setup. only re-installs should setup dns
|
||||
overwriteDns: true
|
||||
},
|
||||
values
|
||||
@@ -1578,6 +1586,7 @@ function importApp(app, data, auditSource, callback) {
|
||||
args: {
|
||||
restoreConfig,
|
||||
oldManifest: app.manifest,
|
||||
skipDnsSetup: false,
|
||||
overwriteDns: true
|
||||
},
|
||||
values: {}
|
||||
@@ -1640,6 +1649,7 @@ function clone(app, data, user, auditSource, callback) {
|
||||
portBindings = data.portBindings || null,
|
||||
backupId = data.backupId,
|
||||
overwriteDns = 'overwriteDns' in data ? data.overwriteDns : false,
|
||||
skipDnsSetup = 'skipDnsSetup' in data ? data.skipDnsSetup : false,
|
||||
appId = app.id;
|
||||
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
@@ -1696,7 +1706,7 @@ function clone(app, data, user, auditSource, callback) {
|
||||
|
||||
const restoreConfig = { backupId: backupId, backupFormat: backupInfo.format };
|
||||
const task = {
|
||||
args: { restoreConfig, overwriteDns, oldManifest: null },
|
||||
args: { restoreConfig, overwriteDns, skipDnsSetup, oldManifest: null },
|
||||
values: {},
|
||||
requiredState: exports.ISTATE_PENDING_CLONE
|
||||
};
|
||||
@@ -1910,8 +1920,6 @@ function autoupdateApps(updateInfo, auditSource, callback) { // updateInfo is {
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (!updateInfo) return callback(null);
|
||||
|
||||
async.eachSeries(Object.keys(updateInfo), function iterator(appId, iteratorDone) {
|
||||
get(appId, function (error, app) {
|
||||
if (error) {
|
||||
@@ -1971,7 +1979,8 @@ function listBackups(app, page, perPage, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function restoreInstalledApps(callback) {
|
||||
function restoreInstalledApps(options, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAll(function (error, apps) {
|
||||
@@ -1994,7 +2003,7 @@ function restoreInstalledApps(callback) {
|
||||
}
|
||||
|
||||
const task = {
|
||||
args: { restoreConfig, overwriteDns: true, oldManifest },
|
||||
args: { restoreConfig, skipDnsSetup: options.skipDnsSetup, overwriteDns: true, oldManifest },
|
||||
values: {},
|
||||
scheduleNow: false, // task will be scheduled by autoRestartTasks when platform is ready
|
||||
requireNullTaskId: false // ignore existing stale taskId
|
||||
|
||||
127
src/apptask.js
127
src/apptask.js
@@ -3,7 +3,7 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
run: run,
|
||||
run,
|
||||
|
||||
// exported for testing
|
||||
_configureReverseProxy: configureReverseProxy,
|
||||
@@ -11,8 +11,6 @@ exports = module.exports = {
|
||||
_createAppDir: createAppDir,
|
||||
_deleteAppDir: deleteAppDir,
|
||||
_verifyManifest: verifyManifest,
|
||||
_registerSubdomains: registerSubdomains,
|
||||
_unregisterSubdomains: unregisterSubdomains,
|
||||
_waitForDnsPropagation: waitForDnsPropagation
|
||||
};
|
||||
|
||||
@@ -333,82 +331,6 @@ function removeIcon(app, callback) {
|
||||
callback(null);
|
||||
}
|
||||
|
||||
function registerSubdomains(app, overwrite, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof overwrite, 'boolean');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
sysinfo.getServerIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const allDomains = [ { subdomain: app.location, domain: app.domain }].concat(app.alternateDomains).concat(app.aliasDomains);
|
||||
|
||||
debugApp(app, `registerSubdomain: Will register ${JSON.stringify(allDomains)}`);
|
||||
|
||||
async.eachSeries(allDomains, function (domain, iteratorDone) {
|
||||
async.retry({ times: 200, interval: 5000 }, function (retryCallback) {
|
||||
debugApp(app, 'Registering subdomain: %s%s', domain.subdomain ? (domain.subdomain + '.') : '', domain.domain);
|
||||
|
||||
// get the current record before updating it
|
||||
domains.getDnsRecords(domain.subdomain, domain.domain, 'A', function (error, values) {
|
||||
if (error && error.reason === BoxError.EXTERNAL_ERROR) return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain })); // try again
|
||||
if (error && error.reason === BoxError.ACCESS_DENIED) return retryCallback(null, new BoxError(BoxError.ACCESS_DENIED, error.message, { domain }));
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return retryCallback(null, new BoxError(BoxError.NOT_FOUND, error.message, { domain }));
|
||||
if (error) return retryCallback(null, new BoxError(BoxError.EXTERNAL_ERROR, error.message, domain)); // give up for other errors
|
||||
|
||||
if (values.length !== 0 && values[0] === ip) return retryCallback(null); // up-to-date
|
||||
|
||||
// refuse to update any existing DNS record for custom domains that we did not create
|
||||
if (values.length !== 0 && !overwrite) return retryCallback(null, new BoxError(BoxError.ALREADY_EXISTS, 'DNS Record already exists', { domain }));
|
||||
|
||||
domains.upsertDnsRecords(domain.subdomain, domain.domain, 'A', [ ip ], function (error) {
|
||||
if (error && (error.reason === BoxError.BUSY || error.reason === BoxError.EXTERNAL_ERROR)) {
|
||||
debugApp(app, 'registerSubdomains: Upsert error. Will retry.', error.message);
|
||||
return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain })); // try again
|
||||
}
|
||||
|
||||
retryCallback(null, error ? new BoxError(BoxError.EXTERNAL_ERROR, error.message, domain) : null);
|
||||
});
|
||||
});
|
||||
}, function (error, result) {
|
||||
if (error || result) return iteratorDone(error || result);
|
||||
|
||||
iteratorDone(null);
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function unregisterSubdomains(app, allDomains, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert(Array.isArray(allDomains));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
sysinfo.getServerIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(allDomains, function (domain, iteratorDone) {
|
||||
async.retry({ times: 30, interval: 5000 }, function (retryCallback) {
|
||||
debugApp(app, 'Unregistering subdomain: %s%s', domain.subdomain ? (domain.subdomain + '.') : '', domain.domain);
|
||||
|
||||
domains.removeDnsRecords(domain.subdomain, domain.domain, 'A', [ ip ], function (error) {
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return retryCallback(null, null);
|
||||
if (error && (error.reason === BoxError.SBUSY || error.reason === BoxError.EXTERNAL_ERROR)) {
|
||||
debugApp(app, 'registerSubdomains: Remove error. Will retry.', error.message);
|
||||
return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain })); // try again
|
||||
}
|
||||
|
||||
retryCallback(null, error ? new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain }) : null);
|
||||
});
|
||||
}, function (error, result) {
|
||||
if (error || result) return iteratorDone(error || result);
|
||||
|
||||
iteratorDone();
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function waitForDnsPropagation(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
@@ -492,6 +414,7 @@ function install(app, args, progressCallback, callback) {
|
||||
|
||||
const restoreConfig = args.restoreConfig; // has to be set when restoring
|
||||
const overwriteDns = args.overwriteDns;
|
||||
const skipDnsSetup = args.skipDnsSetup;
|
||||
const oldManifest = args.oldManifest;
|
||||
|
||||
async.series([
|
||||
@@ -533,8 +456,15 @@ function install(app, args, progressCallback, callback) {
|
||||
progressCallback.bind(null, { percent: 20, message: 'Downloading icon' }),
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
progressCallback.bind(null, { percent: 30, message: 'Registering subdomains' }),
|
||||
registerSubdomains.bind(null, app, overwriteDns),
|
||||
function setupDnsIfNeeded(done) {
|
||||
if (skipDnsSetup) return done();
|
||||
|
||||
async.series([
|
||||
progressCallback.bind(null, { percent: 30, message: 'Registering subdomains' }),
|
||||
|
||||
domains.registerLocations.bind(null, [ { subdomain: app.location, domain: app.domain }].concat(app.alternateDomains).concat(app.aliasDomains), { overwriteDns }, progressCallback)
|
||||
], done);
|
||||
},
|
||||
|
||||
progressCallback.bind(null, { percent: 40, message: 'Downloading image' }),
|
||||
downloadImage.bind(null, app.manifest),
|
||||
@@ -573,8 +503,14 @@ function install(app, args, progressCallback, callback) {
|
||||
|
||||
startApp.bind(null, app),
|
||||
|
||||
progressCallback.bind(null, { percent: 85, message: 'Waiting for DNS propagation' }),
|
||||
exports._waitForDnsPropagation.bind(null, app),
|
||||
function waitForDns(done) {
|
||||
if (skipDnsSetup) return done();
|
||||
|
||||
async.series([
|
||||
progressCallback.bind(null, { percent: 85, message: 'Waiting for DNS propagation' }),
|
||||
exports._waitForDnsPropagation.bind(null, app),
|
||||
], done);
|
||||
},
|
||||
|
||||
progressCallback.bind(null, { percent: 95, message: 'Configuring reverse proxy' }),
|
||||
configureReverseProxy.bind(null, app),
|
||||
@@ -652,6 +588,7 @@ function changeLocation(app, args, progressCallback, callback) {
|
||||
|
||||
const oldConfig = args.oldConfig;
|
||||
const locationChanged = oldConfig.fqdn !== app.fqdn;
|
||||
const skipDnsSetup = args.skipDnsSetup;
|
||||
const overwriteDns = args.overwriteDns;
|
||||
|
||||
async.series([
|
||||
@@ -673,11 +610,17 @@ function changeLocation(app, args, progressCallback, callback) {
|
||||
|
||||
if (obsoleteDomains.length === 0) return next();
|
||||
|
||||
unregisterSubdomains(app, obsoleteDomains, next);
|
||||
domains.unregisterLocations(obsoleteDomains, progressCallback, next);
|
||||
},
|
||||
|
||||
progressCallback.bind(null, { percent: 30, message: 'Registering subdomains' }),
|
||||
registerSubdomains.bind(null, app, overwriteDns),
|
||||
function setupDnsIfNeeded(done) {
|
||||
if (skipDnsSetup) return done();
|
||||
|
||||
async.series([
|
||||
progressCallback.bind(null, { percent: 30, message: 'Registering subdomains' }),
|
||||
domains.registerLocations.bind(null, [ { subdomain: app.location, domain: app.domain }].concat(app.alternateDomains).concat(app.aliasDomains), { overwriteDns }, progressCallback)
|
||||
], done);
|
||||
},
|
||||
|
||||
// re-setup addons since they rely on the app's fqdn (e.g oauth)
|
||||
progressCallback.bind(null, { percent: 50, message: 'Setting up addons' }),
|
||||
@@ -688,8 +631,14 @@ function changeLocation(app, args, progressCallback, callback) {
|
||||
|
||||
startApp.bind(null, app),
|
||||
|
||||
progressCallback.bind(null, { percent: 80, message: 'Waiting for DNS propagation' }),
|
||||
exports._waitForDnsPropagation.bind(null, app),
|
||||
function waitForDns(done) {
|
||||
if (skipDnsSetup) return done();
|
||||
|
||||
async.series([
|
||||
progressCallback.bind(null, { percent: 80, message: 'Waiting for DNS propagation' }),
|
||||
exports._waitForDnsPropagation.bind(null, app),
|
||||
], done);
|
||||
},
|
||||
|
||||
progressCallback.bind(null, { percent: 90, message: 'Configuring reverse proxy' }),
|
||||
configureReverseProxy.bind(null, app),
|
||||
@@ -1003,7 +952,7 @@ function uninstall(app, args, progressCallback, callback) {
|
||||
docker.deleteImage.bind(null, app.manifest),
|
||||
|
||||
progressCallback.bind(null, { percent: 70, message: 'Unregistering domains' }),
|
||||
unregisterSubdomains.bind(null, app, [ { subdomain: app.location, domain: app.domain } ].concat(app.alternateDomains).concat(app.aliasDomains)),
|
||||
domains.unregisterLocations.bind(null, [ { subdomain: app.location, domain: app.domain } ].concat(app.alternateDomains).concat(app.aliasDomains), progressCallback),
|
||||
|
||||
progressCallback.bind(null, { percent: 80, message: 'Cleanup icon' }),
|
||||
removeIcon.bind(null, app),
|
||||
|
||||
@@ -52,7 +52,7 @@ function scheduleTask(appId, taskId, options, callback) {
|
||||
if (Object.keys(gActiveTasks).length >= TASK_CONCURRENCY) {
|
||||
debug(`Reached concurrency limit, queueing task id ${taskId}`);
|
||||
tasks.update(taskId, { percent: 1, message: 'Waiting for other app tasks to complete' }, NOOP_CALLBACK);
|
||||
gPendingTasks.push({ appId, taskId, callback });
|
||||
gPendingTasks.push({ appId, taskId, options, callback });
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ function scheduleTask(appId, taskId, options, callback) {
|
||||
if (lockError) {
|
||||
debug(`Could not get lock. ${lockError.message}, queueing task id ${taskId}`);
|
||||
tasks.update(taskId, { percent: 1, message: waitText(lockError.operation) }, NOOP_CALLBACK);
|
||||
gPendingTasks.push({ appId, taskId, callback });
|
||||
gPendingTasks.push({ appId, taskId, options, callback });
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -91,6 +91,6 @@ function startNextTask() {
|
||||
assert(Object.keys(gActiveTasks).length < TASK_CONCURRENCY);
|
||||
|
||||
const t = gPendingTasks.shift();
|
||||
scheduleTask(t.appId, t.taskId, t.callback);
|
||||
scheduleTask(t.appId, t.taskId, t.options, t.callback);
|
||||
}
|
||||
|
||||
|
||||
29
src/autoconfig.xml.ejs
Normal file
29
src/autoconfig.xml.ejs
Normal file
@@ -0,0 +1,29 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<clientConfig version="1.1">
|
||||
<emailProvider id="<%= domain %>">
|
||||
<domain><%= domain %></domain>
|
||||
<displayName>Cloudron Mail</displayName>
|
||||
<displayShortName>Cloudron</displayShortName>
|
||||
<incomingServer type="imap">
|
||||
<hostname><%= mailFqdn %></hostname>
|
||||
<port>993</port>
|
||||
<socketType>SSL</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</incomingServer>
|
||||
<outgoingServer type="smtp">
|
||||
<hostname><%= mailFqdn %></hostname>
|
||||
<port>587</port>
|
||||
<socketType>STARTTLS</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
<addThisServer>true</addThisServer>
|
||||
</outgoingServer>
|
||||
|
||||
<documentation url="http://cloudron.io/email/#autodiscover">
|
||||
<descr lang="en">Cloudron Email</descr>
|
||||
</documentation>
|
||||
|
||||
</emailProvider>
|
||||
</clientConfig>
|
||||
|
||||
@@ -18,6 +18,7 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
update,
|
||||
list,
|
||||
|
||||
_clear: clear
|
||||
};
|
||||
@@ -80,6 +81,21 @@ function getByIdentifierPaged(identifier, page, perPage, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function list(page, perPage, callback) {
|
||||
assert(typeof page === 'number' && page > 0);
|
||||
assert(typeof perPage === 'number' && perPage > 0);
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups ORDER BY creationTime DESC LIMIT ?,?',
|
||||
[ (page-1)*perPage, perPage ], function (error, results) {
|
||||
if (error) return callback(new BoxError(BoxError.DATABASE_ERROR, error));
|
||||
|
||||
results.forEach(function (result) { postProcess(result); });
|
||||
|
||||
callback(null, results);
|
||||
});
|
||||
}
|
||||
|
||||
function get(id, callback) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
108
src/backups.js
108
src/backups.js
@@ -108,6 +108,7 @@ function api(provider) {
|
||||
case 'backblaze-b2': return require('./storage/s3.js');
|
||||
case 'linode-objectstorage': return require('./storage/s3.js');
|
||||
case 'ovh-objectstorage': return require('./storage/s3.js');
|
||||
case 'ionos-objectstorage': return require('./storage/s3.js');
|
||||
case 'noop': return require('./storage/noop.js');
|
||||
default: return null;
|
||||
}
|
||||
@@ -549,21 +550,29 @@ function saveFsMetadata(dataLayout, metadataFile, callback) {
|
||||
// contains paths prefixed with './'
|
||||
let metadata = {
|
||||
emptyDirs: [],
|
||||
execFiles: []
|
||||
execFiles: [],
|
||||
symlinks: []
|
||||
};
|
||||
|
||||
// we assume small number of files. spawnSync will raise a ENOBUFS error after maxBuffer
|
||||
for (let lp of dataLayout.localPaths()) {
|
||||
var emptyDirs = safe.child_process.execSync(`find ${lp} -type d -empty\n`, { encoding: 'utf8' });
|
||||
if (emptyDirs === null) return callback(safe.error);
|
||||
const emptyDirs = safe.child_process.execSync(`find ${lp} -type d -empty`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (emptyDirs === null) return callback(new BoxError(BoxError.FS_ERROR, `Error finding empty dirs: ${safe.error.message}`));
|
||||
if (emptyDirs.length) metadata.emptyDirs = metadata.emptyDirs.concat(emptyDirs.trim().split('\n').map((ed) => dataLayout.toRemotePath(ed)));
|
||||
|
||||
var execFiles = safe.child_process.execSync(`find ${lp} -type f -executable\n`, { encoding: 'utf8' });
|
||||
if (execFiles === null) return callback(safe.error);
|
||||
|
||||
const execFiles = safe.child_process.execSync(`find ${lp} -type f -executable`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (execFiles === null) return callback(new BoxError(BoxError.FS_ERROR, `Error finding executables: ${safe.error.message}`));
|
||||
if (execFiles.length) metadata.execFiles = metadata.execFiles.concat(execFiles.trim().split('\n').map((ef) => dataLayout.toRemotePath(ef)));
|
||||
|
||||
const symlinks = safe.child_process.execSync(`find ${lp} -type l`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (symlinks === null) return callback(new BoxError(BoxError.FS_ERROR, `Error finding symlinks: ${safe.error.message}`));
|
||||
if (symlinks.length) metadata.symlinks = metadata.symlinks.concat(symlinks.trim().split('\n').map((sl) => {
|
||||
const target = safe.fs.readlinkSync(sl);
|
||||
return { path: dataLayout.toRemotePath(sl), target };
|
||||
}));
|
||||
}
|
||||
|
||||
if (!safe.fs.writeFileSync(metadataFile, JSON.stringify(metadata, null, 4))) return callback(safe.error);
|
||||
if (!safe.fs.writeFileSync(metadataFile, JSON.stringify(metadata, null, 4))) return callback(new BoxError(BoxError.FS_ERROR, `Error writing fs metadata: ${safe.error.message}`));
|
||||
|
||||
callback();
|
||||
}
|
||||
@@ -691,7 +700,19 @@ function restoreFsMetadata(dataLayout, metadataFile, callback) {
|
||||
}, function (error) {
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `unable to chmod: ${error.message}`));
|
||||
|
||||
callback();
|
||||
async.eachSeries(metadata.symlinks || [], function createSymlink(symlink, iteratorDone) {
|
||||
if (!symlink.target) return iteratorDone();
|
||||
// the path may not exist if we had a directory full of symlinks
|
||||
fs.mkdir(path.dirname(dataLayout.toLocalPath(symlink.path)), { recursive: true }, function (error) {
|
||||
if (error) return iteratorDone(error);
|
||||
|
||||
fs.symlink(symlink.target, dataLayout.toLocalPath(symlink.path), 'file', iteratorDone);
|
||||
});
|
||||
}, function (error) {
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `unable to symlink: ${error.message}`));
|
||||
|
||||
callback();
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -805,7 +826,9 @@ function restore(backupConfig, backupId, progressCallback, callback) {
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dataLayout = new DataLayout(paths.BOX_DATA_DIR, []);
|
||||
const boxDataDir = safe.fs.realpathSync(paths.BOX_DATA_DIR);
|
||||
if (!boxDataDir) return callback(new BoxError(BoxError.FS_ERROR, `Error resolving boxdata: ${safe.error.message}`));
|
||||
const dataLayout = new DataLayout(boxDataDir, []);
|
||||
|
||||
download(backupConfig, backupId, backupConfig.format, dataLayout, progressCallback, function (error) {
|
||||
if (error) return callback(error);
|
||||
@@ -829,7 +852,7 @@ function downloadApp(app, restoreConfig, progressCallback, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const appDataDir = safe.fs.realpathSync(path.join(paths.APPS_DATA_DIR, app.id));
|
||||
if (!appDataDir) return callback(safe.error);
|
||||
if (!appDataDir) return callback(new BoxError(BoxError.FS_ERROR, safe.error.message));
|
||||
const dataLayout = new DataLayout(appDataDir, app.dataDir ? [{ localDir: app.dataDir, remoteDir: 'data' }] : []);
|
||||
|
||||
const startTime = new Date();
|
||||
@@ -932,7 +955,7 @@ function uploadBoxSnapshot(backupConfig, progressCallback, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const boxDataDir = safe.fs.realpathSync(paths.BOX_DATA_DIR);
|
||||
if (!boxDataDir) return callback(safe.error);
|
||||
if (!boxDataDir) return callback(new BoxError(BoxError.FS_ERROR, `Error resolving boxdata: ${safe.error.message}`));
|
||||
|
||||
const uploadConfig = {
|
||||
backupId: 'snapshot/box',
|
||||
@@ -1115,7 +1138,7 @@ function uploadAppSnapshot(backupConfig, app, progressCallback, callback) {
|
||||
|
||||
const backupId = util.format('snapshot/app_%s', app.id);
|
||||
const appDataDir = safe.fs.realpathSync(path.join(paths.APPS_DATA_DIR, app.id));
|
||||
if (!appDataDir) return callback(safe.error);
|
||||
if (!appDataDir) return callback(new BoxError(BoxError.FS_ERROR, `Error resolving appsdata: ${safe.error.message}`));
|
||||
|
||||
const dataLayout = new DataLayout(appDataDir, app.dataDir ? [{ localDir: app.dataDir, remoteDir: 'data' }] : []);
|
||||
|
||||
@@ -1323,7 +1346,7 @@ function cleanupBackup(backupConfig, backup, progressCallback, callback) {
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var backupFilePath = getBackupFilePath(backupConfig, backup.id, backup.format);
|
||||
const backupFilePath = getBackupFilePath(backupConfig, backup.id, backup.format);
|
||||
|
||||
function done(error) {
|
||||
if (error) {
|
||||
@@ -1427,6 +1450,46 @@ function cleanupBoxBackups(backupConfig, progressCallback, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function cleanupMissingBackups(backupConfig, progressCallback, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
let page = 1, perPage = 1000, more = false, missingBackupIds = [];
|
||||
|
||||
async.doWhilst(function (whilstCallback) {
|
||||
backupdb.list(page, perPage, function (error, result) {
|
||||
if (error) return whilstCallback(error);
|
||||
|
||||
async.eachSeries(result, function (backup, next) {
|
||||
let backupFilePath = getBackupFilePath(backupConfig, backup.id, backup.format);
|
||||
if (backup.format === 'rsync') backupFilePath = backupFilePath + '/'; // add trailing slash to indicate directory
|
||||
|
||||
api(backupConfig.provider).exists(backupConfig, backupFilePath, function (error, exists) {
|
||||
if (error || exists) return next();
|
||||
|
||||
progressCallback({ message: `Removing missing backup ${backup.id}`});
|
||||
|
||||
backupdb.del(backup.id, function (error) {
|
||||
if (error) debug(`cleanupBackup: error removing ${backup.id} from database`, error);
|
||||
|
||||
missingBackupIds.push(backup.id);
|
||||
|
||||
next();
|
||||
});
|
||||
});
|
||||
}, function () {
|
||||
more = result.length === perPage;
|
||||
whilstCallback();
|
||||
});
|
||||
});
|
||||
}, function (testDone) { return testDone(null, more); }, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
return callback(null, missingBackupIds);
|
||||
});
|
||||
}
|
||||
|
||||
function cleanupCacheFilesSync() {
|
||||
var files = safe.fs.readdirSync(path.join(paths.BACKUP_INFO_DIR));
|
||||
if (!files) return;
|
||||
@@ -1498,12 +1561,18 @@ function cleanup(progressCallback, callback) {
|
||||
cleanupAppBackups(backupConfig, referencedAppBackupIds, progressCallback, function (error, removedAppBackupIds) {
|
||||
if (error) return callback(error);
|
||||
|
||||
progressCallback({ percent: 90, message: 'Cleaning snapshots' });
|
||||
progressCallback({ percent: 70, message: 'Cleaning missing backups' });
|
||||
|
||||
cleanupSnapshots(backupConfig, function (error) {
|
||||
cleanupMissingBackups(backupConfig, progressCallback, function (error, missingBackupIds) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, { removedBoxBackupIds, removedAppBackupIds });
|
||||
progressCallback({ percent: 90, message: 'Cleaning snapshots' });
|
||||
|
||||
cleanupSnapshots(backupConfig, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, { removedBoxBackupIds, removedAppBackupIds, missingBackupIds });
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1515,12 +1584,13 @@ function startCleanupTask(auditSource, callback) {
|
||||
tasks.add(tasks.TASK_CLEAN_BACKUPS, [], function (error, taskId) {
|
||||
if (error) return callback(error);
|
||||
|
||||
tasks.startTask(taskId, {}, (error, result) => { // result is { removedBoxBackups, removedAppBackups }
|
||||
tasks.startTask(taskId, {}, (error, result) => { // result is { removedBoxBackupIds, removedAppBackupIds, missingBackupIds }
|
||||
eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, {
|
||||
taskId,
|
||||
errorMessage: error ? error.message : null,
|
||||
removedBoxBackups: result ? result.removedBoxBackups : [],
|
||||
removedAppBackups: result ? result.removedAppBackups : []
|
||||
removedBoxBackupIds: result ? result.removedBoxBackupIds : [],
|
||||
removedAppBackupIds: result ? result.removedAppBackupIds : [],
|
||||
missingBackupIds: result ? result.missingBackupIds : []
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ exports = module.exports = {
|
||||
setDashboardDomain,
|
||||
updateDashboardDomain,
|
||||
renewCerts,
|
||||
syncDnsRecords,
|
||||
|
||||
runSystemChecks
|
||||
};
|
||||
@@ -72,14 +73,15 @@ function uninitialize(callback) {
|
||||
], callback);
|
||||
}
|
||||
|
||||
function onActivated(callback) {
|
||||
function onActivated(options, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Starting the platform after a user is available means:
|
||||
// 1. mail bounces can now be sent to the cloudron owner
|
||||
// 2. the restore code path can run without sudo (since mail/ is non-root)
|
||||
async.series([
|
||||
platform.start,
|
||||
platform.start.bind(null, options),
|
||||
cron.startJobs,
|
||||
function checkBackupConfiguration(done) {
|
||||
backups.checkConfiguration(function (error, message) {
|
||||
@@ -147,7 +149,7 @@ function runStartupTasks() {
|
||||
return reverseProxy.writeDefaultConfig({ activated: false }, callback);
|
||||
}
|
||||
|
||||
onActivated(callback);
|
||||
onActivated({}, callback);
|
||||
});
|
||||
}
|
||||
];
|
||||
@@ -400,3 +402,16 @@ function setupDnsAndCert(subdomain, domain, auditSource, progressCallback, callb
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function syncDnsRecords(options, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
tasks.add(tasks.TASK_SYNC_DNS_RECORDS, [ options ], function (error, taskId) {
|
||||
if (error) return callback(error);
|
||||
|
||||
tasks.startTask(taskId, {}, NOOP_CALLBACK);
|
||||
|
||||
callback(null, taskId);
|
||||
});
|
||||
}
|
||||
|
||||
14
src/cron.js
14
src/cron.js
@@ -33,9 +33,10 @@ var appHealthMonitor = require('./apphealthmonitor.js'),
|
||||
settings = require('./settings.js'),
|
||||
system = require('./system.js'),
|
||||
updater = require('./updater.js'),
|
||||
updateChecker = require('./updatechecker.js');
|
||||
updateChecker = require('./updatechecker.js'),
|
||||
_ = require('underscore');
|
||||
|
||||
var gJobs = {
|
||||
const gJobs = {
|
||||
autoUpdater: null,
|
||||
backup: null,
|
||||
updateChecker: null,
|
||||
@@ -51,7 +52,7 @@ var gJobs = {
|
||||
appHealthMonitor: null
|
||||
};
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
const NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
// cron format
|
||||
// Seconds: 0-59
|
||||
@@ -198,9 +199,10 @@ function autoupdatePatternChanged(pattern, tz) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (updateInfo.apps && Object.keys(updateInfo.apps).length > 0) {
|
||||
debug('Starting app update to %j', updateInfo.apps);
|
||||
apps.autoupdateApps(updateInfo.apps, auditSource.CRON, NOOP_CALLBACK);
|
||||
const appUpdateInfo = _.omit(updateInfo, 'box');
|
||||
if (Object.keys(appUpdateInfo).length > 0) {
|
||||
debug('Starting app update to %j', appUpdateInfo);
|
||||
apps.autoupdateApps(appUpdateInfo, auditSource.CRON, NOOP_CALLBACK);
|
||||
} else {
|
||||
debug('No app auto updates available');
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
removePrivateFields: removePrivateFields,
|
||||
injectPrivateFields: injectPrivateFields,
|
||||
upsert: upsert,
|
||||
get: get,
|
||||
del: del,
|
||||
wait: wait,
|
||||
verifyDnsConfig: verifyDnsConfig
|
||||
removePrivateFields,
|
||||
injectPrivateFields,
|
||||
upsert,
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
@@ -69,7 +69,7 @@ function getInternal(dnsConfig, zoneName, name, type, callback) {
|
||||
|
||||
iteratorDone();
|
||||
});
|
||||
}, function () { return !!nextPage; }, function (error) {
|
||||
}, function (testDone) { return testDone(null, !!nextPage); }, function (error) {
|
||||
debug('getInternal:', error, JSON.stringify(matchingRecords));
|
||||
|
||||
if (error) return callback(error);
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
removePrivateFields: removePrivateFields,
|
||||
injectPrivateFields: injectPrivateFields,
|
||||
upsert: upsert,
|
||||
get: get,
|
||||
del: del,
|
||||
wait: wait,
|
||||
verifyDnsConfig: verifyDnsConfig
|
||||
removePrivateFields,
|
||||
injectPrivateFields,
|
||||
upsert,
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
};
|
||||
|
||||
let async = require('async'),
|
||||
@@ -99,7 +99,7 @@ function getZoneRecords(dnsConfig, zoneName, name, type, callback) {
|
||||
|
||||
iteratorDone();
|
||||
});
|
||||
}, function () { return more; }, function (error) {
|
||||
}, function (testDone) { return testDone(null, more); }, function (error) {
|
||||
debug('getZoneRecords:', error, JSON.stringify(records));
|
||||
|
||||
if (error) return callback(error);
|
||||
|
||||
110
src/docker.js
110
src/docker.js
@@ -43,6 +43,7 @@ const apps = require('./apps.js'),
|
||||
Docker = require('dockerode'),
|
||||
os = require('os'),
|
||||
path = require('path'),
|
||||
reverseProxy = require('./reverseproxy.js'),
|
||||
services = require('./services.js'),
|
||||
settings = require('./settings.js'),
|
||||
shell = require('./shell.js'),
|
||||
@@ -58,11 +59,13 @@ const CLEARVOLUME_CMD = path.join(__dirname, 'scripts/clearvolume.sh'),
|
||||
const DOCKER_SOCKET_PATH = '/var/run/docker.sock';
|
||||
const gConnection = new Docker({ socketPath: DOCKER_SOCKET_PATH });
|
||||
|
||||
function testRegistryConfig(auth, callback) {
|
||||
assert.strictEqual(typeof auth, 'object');
|
||||
function testRegistryConfig(config, callback) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
gConnection.checkAuth(auth, function (error /*, data */) { // this returns a 500 even for auth errors
|
||||
if (config.provider === 'noop') return callback();
|
||||
|
||||
gConnection.checkAuth(config, function (error /*, data */) { // this returns a 500 even for auth errors
|
||||
if (error) return callback(new BoxError(BoxError.BAD_FIELD, error, { field: 'serverAddress' }));
|
||||
|
||||
callback();
|
||||
@@ -81,14 +84,14 @@ function removePrivateFields(registryConfig) {
|
||||
return registryConfig;
|
||||
}
|
||||
|
||||
function setRegistryConfig(auth, callback) {
|
||||
assert.strictEqual(typeof auth, 'object');
|
||||
function setRegistryConfig(config, callback) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const isLogin = !!auth.password;
|
||||
const isLogin = !!config.password;
|
||||
|
||||
// currently, auth info is not stashed in the db but maybe it should for restore to work?
|
||||
const cmd = isLogin ? `docker login ${auth.serverAddress} --username ${auth.username} --password ${auth.password}` : `docker logout ${auth.serverAddress}`;
|
||||
const cmd = isLogin ? `docker login ${config.serverAddress} --username ${config.username} --password ${config.password}` : `docker logout ${config.serverAddress}`;
|
||||
|
||||
child_process.exec(cmd, { }, function (error /*, stdout, stderr */) {
|
||||
if (error) return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
@@ -134,12 +137,12 @@ function getRegistryConfig(image, callback) {
|
||||
}
|
||||
|
||||
function pullImage(manifest, callback) {
|
||||
getRegistryConfig(manifest.dockerImage, function (error, authConfig) {
|
||||
getRegistryConfig(manifest.dockerImage, function (error, config) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug(`pullImage: will pull ${manifest.dockerImage}. auth: ${authConfig ? 'yes' : 'no'}`);
|
||||
debug(`pullImage: will pull ${manifest.dockerImage}. auth: ${config ? 'yes' : 'no'}`);
|
||||
|
||||
gConnection.pull(manifest.dockerImage, { authconfig: authConfig }, function (error, stream) {
|
||||
gConnection.pull(manifest.dockerImage, { authconfig: config }, function (error, stream) {
|
||||
if (error && error.statusCode === 404) return callback(new BoxError(BoxError.NOT_FOUND, `Unable to pull image ${manifest.dockerImage}. message: ${error.message} statusCode: ${error.statusCode}`));
|
||||
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, `Unable to pull image ${manifest.dockerImage}. Please check the network or if the image needs authentication. statusCode: ${error.statusCode}`));
|
||||
|
||||
@@ -191,25 +194,97 @@ function downloadImage(manifest, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function getBinds(app, callback) {
|
||||
function getVolumeMounts(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (app.mounts.length === 0) return callback(null);
|
||||
let mounts = [];
|
||||
|
||||
let binds = [];
|
||||
if (app.mounts.length === 0) return callback(null, []);
|
||||
|
||||
volumes.list(function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
let volumesById = {};
|
||||
result.forEach(r => volumesById[r.id] = r);
|
||||
|
||||
for (const mount of app.mounts) {
|
||||
const volume = volumesById[mount.volumeId];
|
||||
binds.push(`${volume.hostPath}:/media/${volume.name}:${mount.readOnly ? 'ro' : 'rw'}`);
|
||||
|
||||
mounts.push({
|
||||
Source: volume.hostPath,
|
||||
Target: `/media/${volume.name}`,
|
||||
Type: 'bind',
|
||||
ReadOnly: mount.readOnly
|
||||
});
|
||||
}
|
||||
|
||||
callback(null, binds);
|
||||
callback(null, mounts);
|
||||
});
|
||||
}
|
||||
|
||||
function getAddonMounts(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
let mounts = [];
|
||||
|
||||
const addons = app.manifest.addons;
|
||||
if (!addons) return callback(null, mounts);
|
||||
|
||||
async.eachSeries(Object.keys(addons), function (addon, iteratorDone) {
|
||||
switch (addon) {
|
||||
case 'localstorage':
|
||||
mounts.push({
|
||||
Target: '/app/data',
|
||||
Source: `${app.id}-localstorage`,
|
||||
Type: 'volume',
|
||||
ReadOnly: false
|
||||
});
|
||||
|
||||
return iteratorDone();
|
||||
case 'tls':
|
||||
reverseProxy.getCertificate(app.fqdn, app.domain, function (error, bundle) {
|
||||
if (error) return iteratorDone(error);
|
||||
|
||||
mounts.push({
|
||||
Target: '/etc/certs/tls_cert.pem',
|
||||
Source: bundle.certFilePath,
|
||||
Type: 'bind',
|
||||
ReadOnly: true
|
||||
});
|
||||
|
||||
mounts.push({
|
||||
Target: '/etc/certs/tls_key.pem',
|
||||
Source: bundle.keyFilePath,
|
||||
Type: 'bind',
|
||||
ReadOnly: true
|
||||
});
|
||||
|
||||
iteratorDone();
|
||||
});
|
||||
|
||||
return;
|
||||
default:
|
||||
iteratorDone();
|
||||
}
|
||||
}, function (error) {
|
||||
callback(error, mounts);
|
||||
});
|
||||
}
|
||||
|
||||
function getMounts(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getVolumeMounts(app, function (error, volumeMounts) {
|
||||
if (error) return callback(error);
|
||||
|
||||
getAddonMounts(app, function (error, addonMounts) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, volumeMounts.concat(addonMounts));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -278,7 +353,7 @@ function createSubcontainer(app, name, cmd, options, callback) {
|
||||
services.getEnvironment(app, function (error, addonEnv) {
|
||||
if (error) return callback(error);
|
||||
|
||||
getBinds(app, function (error, binds) {
|
||||
getMounts(app, function (error, mounts) {
|
||||
if (error) return callback(error);
|
||||
|
||||
let containerOptions = {
|
||||
@@ -299,8 +374,7 @@ function createSubcontainer(app, name, cmd, options, callback) {
|
||||
'isCloudronManaged': String(true)
|
||||
},
|
||||
HostConfig: {
|
||||
Mounts: services.getMountsSync(app, app.manifest.addons),
|
||||
Binds: binds, // ideally, we have to use 'Mounts' but we have to create volumes then
|
||||
Mounts: mounts,
|
||||
LogConfig: {
|
||||
Type: 'syslog',
|
||||
Config: {
|
||||
|
||||
143
src/domains.js
143
src/domains.js
@@ -26,12 +26,19 @@ module.exports = exports = {
|
||||
|
||||
parentDomain,
|
||||
|
||||
checkDnsRecords
|
||||
registerLocations,
|
||||
unregisterLocations,
|
||||
|
||||
checkDnsRecords,
|
||||
syncDnsRecords
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
const apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
constants = require('./constants.js'),
|
||||
crypto = require('crypto'),
|
||||
debug = require('debug')('box:domains'),
|
||||
domaindb = require('./domaindb.js'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
@@ -191,7 +198,11 @@ function add(domain, data, auditSource, callback) {
|
||||
let error = validateTlsConfig(tlsConfig, provider);
|
||||
if (error) return callback(error);
|
||||
|
||||
if (!dkimSelector) dkimSelector = 'cloudron-' + settings.adminDomain().replace(/\./g, '');
|
||||
if (!dkimSelector) {
|
||||
// create a unique suffix. this lets one add this domain can be added in another cloudron instance and not have their dkim selector conflict
|
||||
const suffix = crypto.createHash('sha256').update(settings.adminDomain()).digest('hex').substr(0, 6);
|
||||
dkimSelector = `cloudron-${suffix}`;
|
||||
}
|
||||
|
||||
verifyDnsConfig(config, domain, zoneName, provider, function (error, sanitizedConfig) {
|
||||
if (error) return callback(error);
|
||||
@@ -313,7 +324,7 @@ function del(domain, auditSource, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (domain === settings.adminDomain()) return callback(new BoxError(BoxError.CONFLICT, 'Cannot remove admin domain'));
|
||||
if (domain === settings.mailDomain()) return callback(new BoxError(BoxError.CONFLICT, 'Cannot remove mail domain'));
|
||||
if (domain === settings.mailDomain()) return callback(new BoxError(BoxError.CONFLICT, 'Cannot remove mail domain. Change the mail server location first'));
|
||||
|
||||
domaindb.del(domain, function (error) {
|
||||
if (error) return callback(error);
|
||||
@@ -337,6 +348,7 @@ function clear(callback) {
|
||||
}
|
||||
|
||||
// returns the 'name' that needs to be inserted into zone
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
function getName(domain, location, type) {
|
||||
const part = domain.domain.slice(0, -domain.zoneName.length - 1);
|
||||
|
||||
@@ -463,3 +475,126 @@ function makeWildcard(vhost) {
|
||||
parts[0] = '*';
|
||||
return parts.join('.');
|
||||
}
|
||||
|
||||
function registerLocations(locations, options, progressCallback, callback) {
|
||||
assert(Array.isArray(locations));
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`registerLocations: Will register ${JSON.stringify(locations)} with options ${JSON.stringify(options)}`);
|
||||
|
||||
const overwriteDns = options.overwriteDns || false;
|
||||
|
||||
sysinfo.getServerIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(locations, function (location, iteratorDone) {
|
||||
async.retry({ times: 200, interval: 5000 }, function (retryCallback) {
|
||||
progressCallback({ message: `Registering location: ${location.subdomain ? (location.subdomain + '.') : ''}${location.domain}` });
|
||||
|
||||
// get the current record before updating it
|
||||
getDnsRecords(location.subdomain, location.domain, 'A', function (error, values) {
|
||||
if (error && error.reason === BoxError.EXTERNAL_ERROR) return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain: location })); // try again
|
||||
if (error && error.reason === BoxError.ACCESS_DENIED) return retryCallback(null, new BoxError(BoxError.ACCESS_DENIED, error.message, { domain: location }));
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return retryCallback(null, new BoxError(BoxError.NOT_FOUND, error.message, { domain: location }));
|
||||
if (error) return retryCallback(null, new BoxError(BoxError.EXTERNAL_ERROR, error.message, location)); // give up for other errors
|
||||
|
||||
if (values.length !== 0 && values[0] === ip) return retryCallback(null); // up-to-date
|
||||
|
||||
// refuse to update any existing DNS record for custom domains that we did not create
|
||||
if (values.length !== 0 && !overwriteDns) return retryCallback(null, new BoxError(BoxError.ALREADY_EXISTS, 'DNS Record already exists', { domain: location }));
|
||||
|
||||
upsertDnsRecords(location.subdomain, location.domain, 'A', [ ip ], function (error) {
|
||||
if (error && (error.reason === BoxError.BUSY || error.reason === BoxError.EXTERNAL_ERROR)) {
|
||||
progressCallback({ message: `registerSubdomains: Upsert error. Will retry. ${error.message}` });
|
||||
return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain: location })); // try again
|
||||
}
|
||||
|
||||
retryCallback(null, error ? new BoxError(BoxError.EXTERNAL_ERROR, error.message, location) : null);
|
||||
});
|
||||
});
|
||||
}, function (error, result) {
|
||||
if (error || result) return iteratorDone(error || result);
|
||||
|
||||
iteratorDone(null);
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function unregisterLocations(locations, progressCallback, callback) {
|
||||
assert(Array.isArray(locations));
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
sysinfo.getServerIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(locations, function (location, iteratorDone) {
|
||||
async.retry({ times: 30, interval: 5000 }, function (retryCallback) {
|
||||
progressCallback({ message: `Unregistering location: ${location.subdomain ? (location.subdomain + '.') : ''}${location.domain}` });
|
||||
|
||||
removeDnsRecords(location.subdomain, location.domain, 'A', [ ip ], function (error) {
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return retryCallback(null, null);
|
||||
if (error && (error.reason === BoxError.SBUSY || error.reason === BoxError.EXTERNAL_ERROR)) {
|
||||
progressCallback({ message: `Error unregistering location. Will retry. ${error.message}`});
|
||||
return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain: location })); // try again
|
||||
}
|
||||
|
||||
retryCallback(null, error ? new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain: location }) : null);
|
||||
});
|
||||
}, function (error, result) {
|
||||
if (error || result) return iteratorDone(error || result);
|
||||
|
||||
iteratorDone();
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function syncDnsRecords(options, progressCallback, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (options.domain && options.type === 'mail') return mail.setDnsRecords(options.domain, callback);
|
||||
|
||||
getAll(function (error, domains) {
|
||||
if (error) return callback(error);
|
||||
|
||||
if (options.domain) domains = domains.filter(d => d.domain === options.domain);
|
||||
|
||||
const mailSubdomain = settings.mailFqdn().substr(0, settings.mailFqdn().length - settings.mailDomain().length - 1);
|
||||
|
||||
apps.getAll(function (error, allApps) {
|
||||
if (error) return callback(error);
|
||||
|
||||
let progress = 1, errors = [];
|
||||
|
||||
// we sync by domain only to get some nice progress
|
||||
async.eachSeries(domains, function (domain, iteratorDone) {
|
||||
progressCallback({ percent: progress, message: `Updating DNS of ${domain.domain}`});
|
||||
progress += Math.round(100/(1+domains.length));
|
||||
|
||||
let locations = [];
|
||||
if (domain.domain === settings.adminDomain()) locations.push({ subdomain: constants.ADMIN_LOCATION, domain: settings.adminDomain() });
|
||||
if (domain.domain === settings.mailDomain() && settings.mailFqdn() !== settings.adminFqdn()) locations.push({ subdomain: mailSubdomain, domain: settings.mailDomain() });
|
||||
|
||||
allApps.forEach(function (app) {
|
||||
const appLocations = [{ subdomain: app.location, domain: app.domain }].concat(app.alternateDomains).concat(app.aliasDomains);
|
||||
locations = locations.concat(appLocations.filter(al => al.domain === domain.domain));
|
||||
});
|
||||
|
||||
async.series([
|
||||
registerLocations.bind(null, locations, { overwriteDns: true }, progressCallback),
|
||||
progressCallback.bind(null, { message: `Updating mail DNS of ${domain.domain}`}),
|
||||
mail.setDnsRecords.bind(null, domain.domain)
|
||||
], function (error) {
|
||||
if (error) errors.push({ domain: domain.domain, message: error.message });
|
||||
iteratorDone();
|
||||
});
|
||||
}, () => callback(null, { errors }));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -9,19 +9,19 @@ exports = module.exports = {
|
||||
'version': '48.18.0',
|
||||
|
||||
'baseImages': [
|
||||
{ repo: 'cloudron/base', tag: 'cloudron/base:2.0.0@sha256:f9fea80513aa7c92fe2e7bf3978b54c8ac5222f47a9a32a7f8833edf0eb5a4f4' }
|
||||
{ repo: 'cloudron/base', tag: 'cloudron/base:3.0.0@sha256:455c70428723e3a823198c57472785437eb6eab082e79b3ff04ea584faf46e92' }
|
||||
],
|
||||
|
||||
// a major version bump in the db containers will trigger the restore logic that uses the db dumps
|
||||
// docker inspect --format='{{index .RepoDigests 0}}' $IMAGE to get the sha256
|
||||
'images': {
|
||||
'turn': { repo: 'cloudron/turn', tag: 'cloudron/turn:1.2.0@sha256:4359aae80050a92bae3be30600fb93ef4dbaec6dc9254bda353c0b131a36f969' },
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:2.3.2@sha256:dd624870c7f8ba9b2759f93ce740d1e092a1ac4b2d6af5007a01b30ad6b316d0' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:3.3.0@sha256:0daf1be5320c095077392bf21d247b93ceaddca46c866c17259a335c80d2f357' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:3.0.0@sha256:59e50b1f55e433ffdf6d678f8c658812b4119f631db8325572a52ee40d3bc562' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:2.3.0@sha256:0e31ec817e235b1814c04af97b1e7cf0053384aca2569570ce92bef0d95e94d2' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.1.0@sha256:18e0d75ad88a3e66849de2c4c01f794e8df9235befd74544838e34b65f487740' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:2.3.0@sha256:b7bc1ca4f4d0603a01369a689129aa273a938ce195fe43d00d42f4f2d5212f50' },
|
||||
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.0.0@sha256:7e0165f17789192fd4f92efb34aa373450fa859e3b502684b2b121a5582965bf' }
|
||||
'turn': { repo: 'cloudron/turn', tag: 'cloudron/turn:1.3.0@sha256:386fb755fc41edd7086f7bcb230f7f28078936f9ae4ead6d97c741df1cc194ae' },
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:3.0.4@sha256:4d688c746f27b195d98f35a7d24ec01f3f754e0ca61e9de0b0bc9793553880f1' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:4.0.2@sha256:424081fd38ebd35f3606c64f8f99138570e5f4d5066f12cfb4142447d249d3e7' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:4.0.1@sha256:ad20a9a5dcb2ab132374a7c8d44b89af0ec37651cf889e570f7625b02ee85fdf' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:3.0.2@sha256:caaa1f7f4055ae8990d8ec65bd100567496df7e4ed5eb427867f3717a8dcbf92' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.2.3@sha256:fdc4aa6d2c85aeafe65eaa4243aada0cc2e57b94f6eaee02c9b1a8fb89b01dd7' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:2.4.0@sha256:953bbd8b72a9108a8526d2c0bdbba67e1e1563ff59d0a117f0884dba1576f3dd' },
|
||||
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.2.0@sha256:61e8247ded1e07cf882ca478dab180960357c614472e80b938f1f690a46788c2' }
|
||||
}
|
||||
};
|
||||
|
||||
@@ -740,6 +740,12 @@ function start(callback) {
|
||||
res.end();
|
||||
});
|
||||
|
||||
// just log that an attempt was made to unknown route, this helps a lot during app packaging
|
||||
gServer.use(function(req, res, next) {
|
||||
debug('not handled: dn %s, scope %s, filter %s (from %s)', req.dn ? req.dn.toString() : '-', req.scope, req.filter ? req.filter.toString() : '-', req.connection.ldap.id);
|
||||
return next();
|
||||
});
|
||||
|
||||
gServer.listen(constants.LDAP_PORT, '0.0.0.0', callback);
|
||||
}
|
||||
|
||||
|
||||
39
src/mail.js
39
src/mail.js
@@ -875,37 +875,40 @@ function upsertDnsRecords(domain, mailFqdn, callback) {
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback();
|
||||
|
||||
var dkimKey = readDkimPublicKeySync(domain);
|
||||
const dkimKey = readDkimPublicKeySync(domain);
|
||||
if (!dkimKey) return callback(new BoxError(BoxError.FS_ERROR, 'Failed to read dkim public key'));
|
||||
|
||||
// t=s limits the domainkey to this domain and not it's subdomains
|
||||
var dkimRecord = { subdomain: `${mailDomain.dkimSelector}._domainkey`, domain: domain, type: 'TXT', values: [ '"v=DKIM1; t=s; p=' + dkimKey + '"' ] };
|
||||
const dkimRecord = { subdomain: `${mailDomain.dkimSelector}._domainkey`, domain: domain, type: 'TXT', values: [ `"v=DKIM1; t=s; p=${dkimKey}"` ] };
|
||||
|
||||
var records = [ ];
|
||||
let records = [];
|
||||
records.push(dkimRecord);
|
||||
if (mailDomain.enabled) {
|
||||
records.push({ subdomain: '_dmarc', domain: domain, type: 'TXT', values: [ '"v=DMARC1; p=reject; pct=100"' ] });
|
||||
records.push({ subdomain: '', domain: domain, type: 'MX', values: [ '10 ' + mailFqdn + '.' ] });
|
||||
}
|
||||
if (mailDomain.enabled) records.push({ subdomain: '', domain: domain, type: 'MX', values: [ '10 ' + mailFqdn + '.' ] });
|
||||
|
||||
txtRecordsWithSpf(domain, mailFqdn, function (error, txtRecords) {
|
||||
if (error) return callback(error);
|
||||
|
||||
if (txtRecords) records.push({ subdomain: '', domain: domain, type: 'TXT', values: txtRecords });
|
||||
|
||||
debug('upsertDnsRecords: will update %j', records);
|
||||
domains.getDnsRecords('_dmarc', domain, 'TXT', function (error, dmarcRecords) { // only update dmarc if absent. this allows user to set email for reporting
|
||||
if (error) return callback(error);
|
||||
|
||||
async.mapSeries(records, function (record, iteratorCallback) {
|
||||
domains.upsertDnsRecords(record.subdomain, record.domain, record.type, record.values, iteratorCallback);
|
||||
}, function (error, changeIds) {
|
||||
if (error) {
|
||||
debug(`upsertDnsRecords: failed to update: ${error}`);
|
||||
return callback(error);
|
||||
}
|
||||
if (dmarcRecords.length === 0) records.push({ subdomain: '_dmarc', domain: domain, type: 'TXT', values: [ '"v=DMARC1; p=reject; pct=100"' ] });
|
||||
|
||||
debug('upsertDnsRecords: records %j added with changeIds %j', records, changeIds);
|
||||
debug('upsertDnsRecords: will update %j', records);
|
||||
|
||||
callback(null);
|
||||
async.mapSeries(records, function (record, iteratorCallback) {
|
||||
domains.upsertDnsRecords(record.subdomain, record.domain, record.type, record.values, iteratorCallback);
|
||||
}, function (error, changeIds) {
|
||||
if (error) {
|
||||
debug(`upsertDnsRecords: failed to update: ${error}`);
|
||||
return callback(error);
|
||||
}
|
||||
|
||||
debug('upsertDnsRecords: records %j added with changeIds %j', records, changeIds);
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1421,7 +1424,7 @@ function resolveList(listName, listDomain, callback) {
|
||||
|
||||
let result = [], toResolve = list.members.slice(), visited = []; // slice creates a copy of array
|
||||
|
||||
async.whilst(() => toResolve.length != 0, function (iteratorCallback) {
|
||||
async.whilst((testDone) => testDone(null, toResolve.length != 0), function (iteratorCallback) {
|
||||
const toProcess = toResolve.shift();
|
||||
const parts = toProcess.split('@');
|
||||
const memberName = parts[0].split('+')[0], memberDomain = parts[1];
|
||||
|
||||
@@ -262,8 +262,8 @@ function listAllMailboxes(page, perPage, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const query = 'SELECT m1.name AS name, m1.domain AS domain, m1.ownerId AS ownerId, m1.ownerType as ownerType, JSON_ARRAYAGG(m2.name) AS aliasNames, JSON_ARRAYAGG(m2.domain) AS aliasDomains '
|
||||
+ ` FROM (SELECT * FROM mailboxes WHERE type='${exports.TYPE_MAILBOX}') AS m1` +
|
||||
+ ` LEFT JOIN (SELECT * FROM mailboxes WHERE type='${exports.TYPE_ALIAS}') AS m2` +
|
||||
+ ` FROM (SELECT * FROM mailboxes WHERE type='${exports.TYPE_MAILBOX}') AS m1`
|
||||
+ ` LEFT JOIN (SELECT * FROM mailboxes WHERE type='${exports.TYPE_ALIAS}') AS m2`
|
||||
+ ' ON m1.name=m2.aliasName AND m1.domain=m2.aliasDomain AND m1.ownerId=m2.ownerId'
|
||||
+ ' GROUP BY m1.name, m1.domain, m1.ownerId'
|
||||
+ ' ORDER BY name LIMIT ?,?';
|
||||
|
||||
@@ -95,7 +95,7 @@ server {
|
||||
proxy_hide_header X-Content-Type-Options;
|
||||
add_header X-Permitted-Cross-Domain-Policies "none";
|
||||
proxy_hide_header X-Permitted-Cross-Domain-Policies;
|
||||
add_header Referrer-Policy "no-referrer-when-downgrade";
|
||||
add_header Referrer-Policy "same-origin";
|
||||
proxy_hide_header Referrer-Policy;
|
||||
|
||||
# workaround caching issue after /logout. if max-age is set, browser uses cache and user thinks they have not logged out
|
||||
@@ -108,7 +108,7 @@ server {
|
||||
|
||||
# gzip responses that are > 50k and not images
|
||||
gzip on;
|
||||
gzip_min_length 50k;
|
||||
gzip_min_length 18k;
|
||||
gzip_types text/css text/javascript text/xml text/plain application/javascript application/x-javascript application/json;
|
||||
|
||||
# enable for proxied requests as well
|
||||
@@ -258,7 +258,7 @@ server {
|
||||
}
|
||||
|
||||
location @proxy-auth-login {
|
||||
if ($http_user_agent ~* "docker-client") {
|
||||
if ($http_user_agent ~* "docker") {
|
||||
return 401;
|
||||
}
|
||||
return 302 /login?redirect=$request_uri;
|
||||
@@ -292,9 +292,11 @@ server {
|
||||
<% }); %>
|
||||
|
||||
<% } else if ( endpoint === 'redirect' ) { %>
|
||||
location / {
|
||||
# redirect everything to the app. this is temporary because there is no way
|
||||
# to clear a permanent redirect on the browser
|
||||
return 302 https://<%= redirectTo %>$request_uri;
|
||||
}
|
||||
<% } else if ( endpoint === 'ip' ) { %>
|
||||
location /notfound.html {
|
||||
root <%= sourceDir %>/dashboard/dist;
|
||||
|
||||
@@ -23,7 +23,8 @@ const apps = require('./apps.js'),
|
||||
tasks = require('./tasks.js'),
|
||||
_ = require('underscore');
|
||||
|
||||
function start(callback) {
|
||||
function start(options, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (process.env.BOX_ENV === 'test' && !process.env.TEST_CREATE_INFRA) return callback();
|
||||
@@ -52,7 +53,7 @@ function start(callback) {
|
||||
|
||||
async.series([
|
||||
(next) => { if (existingInfra.version !== infra.version) removeAllContainers(next); else next(); },
|
||||
markApps.bind(null, existingInfra), // mark app state before we start addons. this gives the db import logic a chance to mark an app as errored
|
||||
markApps.bind(null, existingInfra, options), // mark app state before we start addons. this gives the db import logic a chance to mark an app as errored
|
||||
services.startServices.bind(null, existingInfra),
|
||||
fs.writeFile.bind(fs, paths.INFRA_VERSION_FILE, JSON.stringify(infra, null, 4))
|
||||
], function (error) {
|
||||
@@ -118,10 +119,14 @@ function removeAllContainers(callback) {
|
||||
], callback);
|
||||
}
|
||||
|
||||
function markApps(existingInfra, callback) {
|
||||
function markApps(existingInfra, options, callback) {
|
||||
assert.strictEqual(typeof existingInfra, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (existingInfra.version === 'none') { // cloudron is being restored from backup
|
||||
debug('markApps: restoring installed apps');
|
||||
apps.restoreInstalledApps(callback);
|
||||
apps.restoreInstalledApps(options, callback);
|
||||
} else if (existingInfra.version !== infra.version) {
|
||||
debug('markApps: reconfiguring installed apps');
|
||||
reverseProxy.removeAppConfigs(); // should we change the cert location, nginx will not start
|
||||
|
||||
@@ -149,16 +149,17 @@ function activate(username, password, email, displayName, ip, auditSource, callb
|
||||
expires: result.expires
|
||||
});
|
||||
|
||||
setImmediate(cloudron.onActivated.bind(null, NOOP_CALLBACK)); // hack for now to not block the above http response
|
||||
setImmediate(cloudron.onActivated.bind(null, {}, NOOP_CALLBACK)); // hack for now to not block the above http response
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function restore(backupConfig, backupId, version, sysinfoConfig, auditSource, callback) {
|
||||
function restore(backupConfig, backupId, version, sysinfoConfig, options, auditSource, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof version, 'string');
|
||||
assert.strictEqual(typeof sysinfoConfig, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -203,7 +204,10 @@ function restore(backupConfig, backupId, version, sysinfoConfig, auditSource, ca
|
||||
(done) => {
|
||||
const adminDomain = settings.adminDomain(); // load this fresh from after the backup.restore
|
||||
async.series([
|
||||
cloudron.setupDnsAndCert.bind(null, constants.ADMIN_LOCATION, adminDomain, auditSource, (progress) => setProgress('restore', progress.message, NOOP_CALLBACK)),
|
||||
(next) => {
|
||||
if (options.skipDnsSetup) return next();
|
||||
cloudron.setupDnsAndCert(constants.ADMIN_LOCATION, adminDomain, auditSource, (progress) => setProgress('restore', progress.message, NOOP_CALLBACK), next);
|
||||
},
|
||||
cloudron.setDashboardDomain.bind(null, adminDomain, auditSource)
|
||||
], done);
|
||||
},
|
||||
@@ -213,7 +217,7 @@ function restore(backupConfig, backupId, version, sysinfoConfig, auditSource, ca
|
||||
gProvisionStatus.restore.active = false;
|
||||
gProvisionStatus.restore.errorMessage = error ? error.message : '';
|
||||
|
||||
if (!error) cloudron.onActivated(NOOP_CALLBACK);
|
||||
if (!error) cloudron.onActivated(options, NOOP_CALLBACK);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -56,11 +56,17 @@ function basicAuthVerify(req, res, next) {
|
||||
|
||||
const api = credentials.name.indexOf('@') !== -1 ? users.verifyWithEmail : users.verifyWithUsername;
|
||||
|
||||
api(credentials.name, credentials.pass, appId, function (error, user) {
|
||||
if (error) return next(new HttpError(403, 'Invalid username or password' ));
|
||||
apps.get(appId, function (error, app) {
|
||||
if (error) return next(new HttpError(503, error.message));
|
||||
|
||||
req.user = user;
|
||||
next();
|
||||
if (!app.manifest.addons.proxyAuth.basicAuth) return next();
|
||||
|
||||
api(credentials.name, credentials.pass, appId, function (error, user) {
|
||||
if (error) return next(new HttpError(403, 'Invalid username or password' ));
|
||||
|
||||
req.user = user;
|
||||
next();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -104,7 +110,8 @@ function isBrowser(req) {
|
||||
const userAgent = req.get('user-agent');
|
||||
if (!userAgent) return false;
|
||||
|
||||
return !userAgent.toLowerCase().includes('docker-client');
|
||||
// https://github.com/docker/engine/blob/master/dockerversion/useragent.go#L18
|
||||
return !userAgent.toLowerCase().includes('docker');
|
||||
}
|
||||
|
||||
// called by nginx to authorize any protected route. this route must return only 2xx or 401/403 (http://nginx.org/en/docs/http/ngx_http_auth_request_module.html)
|
||||
|
||||
@@ -678,7 +678,13 @@ function renewCerts(options, auditSource, progressCallback, callback) {
|
||||
|
||||
async.series([
|
||||
(next) => { return renewed.includes(settings.mailFqdn()) ? mail.handleCertChanged(next) : next(); },// mail cert renewed
|
||||
reload // reload nginx if any certs were updated but the config was not rewritten
|
||||
reload, // reload nginx if any certs were updated but the config was not rewritten
|
||||
(next) => { // restart tls apps on cert change
|
||||
const tlsApps = allApps.filter(app => app.manifest.addons && app.manifest.addons.tls && renewed.includes(app.fqdn));
|
||||
async.eachSeries(tlsApps, function (app, iteratorDone) {
|
||||
apps.restart(app, auditSource, () => iteratorDone());
|
||||
}, next);
|
||||
}
|
||||
], callback);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -151,6 +151,7 @@ function install(req, res, next) {
|
||||
}
|
||||
|
||||
if ('overwriteDns' in req.body && typeof req.body.overwriteDns !== 'boolean') return next(new HttpError(400, 'overwriteDns must be boolean'));
|
||||
if ('skipDnsSetup' in req.body && typeof req.body.skipDnsSetup !== 'boolean') return next(new HttpError(400, 'skipDnsSetup must be boolean'));
|
||||
|
||||
apps.downloadManifest(data.appStoreId, data.manifest, function (error, appStoreId, manifest) {
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
@@ -365,6 +366,7 @@ function setLocation(req, res, next) {
|
||||
}
|
||||
|
||||
if ('overwriteDns' in req.body && typeof req.body.overwriteDns !== 'boolean') return next(new HttpError(400, 'overwriteDns must be boolean'));
|
||||
if ('skipDnsSetup' in req.body && typeof req.body.skipDnsSetup !== 'boolean') return next(new HttpError(400, 'skipDnsSetup must be boolean'));
|
||||
|
||||
apps.setLocation(req.resource, req.body, auditSource.fromRequest(req), function (error, result) {
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
@@ -478,6 +480,7 @@ function clone(req, res, next) {
|
||||
if (('portBindings' in data) && typeof data.portBindings !== 'object') return next(new HttpError(400, 'portBindings must be an object'));
|
||||
|
||||
if ('overwriteDns' in req.body && typeof req.body.overwriteDns !== 'boolean') return next(new HttpError(400, 'overwriteDns must be boolean'));
|
||||
if ('skipDnsSetup' in req.body && typeof req.body.skipDnsSetup !== 'boolean') return next(new HttpError(400, 'skipDnsSetup must be boolean'));
|
||||
|
||||
apps.clone(req.resource, data, req.user, auditSource.fromRequest(req), function (error, result) {
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
|
||||
@@ -21,7 +21,8 @@ exports = module.exports = {
|
||||
renewCerts,
|
||||
getServerIp,
|
||||
getLanguages,
|
||||
syncExternalLdap
|
||||
syncExternalLdap,
|
||||
syncDnsRecords
|
||||
};
|
||||
|
||||
let assert = require('assert'),
|
||||
@@ -280,6 +281,8 @@ function prepareDashboardDomain(req, res, next) {
|
||||
}
|
||||
|
||||
function renewCerts(req, res, next) {
|
||||
if ('domain' in req.body && typeof req.body.domain !== 'string') return next(new HttpError(400, 'domain must be a string'));
|
||||
|
||||
cloudron.renewCerts({ domain: req.body.domain || null }, auditSource.fromRequest(req), function (error, taskId) {
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
|
||||
@@ -291,7 +294,7 @@ function syncExternalLdap(req, res, next) {
|
||||
externalLdap.startSyncer(function (error, taskId) {
|
||||
if (error) return next(new HttpError(500, error.message));
|
||||
|
||||
next(new HttpSuccess(202, { taskId: taskId }));
|
||||
next(new HttpSuccess(202, { taskId }));
|
||||
});
|
||||
}
|
||||
|
||||
@@ -310,3 +313,17 @@ function getLanguages(req, res, next) {
|
||||
next(new HttpSuccess(200, { languages }));
|
||||
});
|
||||
}
|
||||
|
||||
function syncDnsRecords(req, res, next) {
|
||||
assert.strictEqual(typeof req.body, 'object');
|
||||
|
||||
if ('domain' in req.body && typeof req.body.domain !== 'string') return next(new HttpError(400, 'domain must be a string'));
|
||||
if ('type' in req.body && typeof req.body.type !== 'string') return next(new HttpError(400, 'type must be a string'));
|
||||
|
||||
cloudron.syncDnsRecords(req.body, function (error, taskId) {
|
||||
if (error && error.reason === BoxError.ACCESS_DENIED) return next(new HttpSuccess(200, { error: { reason: error.reason, message: error.message }}));
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
|
||||
next(new HttpSuccess(201, { taskId }));
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
add: add,
|
||||
get: get,
|
||||
getAll: getAll,
|
||||
update: update,
|
||||
del: del,
|
||||
add,
|
||||
get,
|
||||
getAll,
|
||||
update,
|
||||
del,
|
||||
|
||||
checkDnsRecords: checkDnsRecords,
|
||||
checkDnsRecords,
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
exports = module.exports = {
|
||||
getDomain,
|
||||
|
||||
setDnsRecords,
|
||||
|
||||
getStatus,
|
||||
|
||||
setMailFromValidation,
|
||||
@@ -50,21 +48,6 @@ function getDomain(req, res, next) {
|
||||
});
|
||||
}
|
||||
|
||||
function setDnsRecords(req, res, next) {
|
||||
assert.strictEqual(typeof req.body, 'object');
|
||||
assert.strictEqual(typeof req.params.domain, 'string');
|
||||
|
||||
// can take a setup all the DNS entries. this is mostly because some backends try to list DNS entries (DO)
|
||||
// for upsert and this takes a lot of time
|
||||
req.clearTimeout();
|
||||
|
||||
mail.setDnsRecords(req.params.domain, function (error) {
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
|
||||
next(new HttpSuccess(201));
|
||||
});
|
||||
}
|
||||
|
||||
function getStatus(req, res, next) {
|
||||
assert.strictEqual(typeof req.params.domain, 'string');
|
||||
|
||||
|
||||
@@ -111,7 +111,7 @@ function restore(req, res, next) {
|
||||
|
||||
if (!req.body.backupConfig || typeof req.body.backupConfig !== 'object') return next(new HttpError(400, 'backupConfig is required'));
|
||||
|
||||
var backupConfig = req.body.backupConfig;
|
||||
const backupConfig = req.body.backupConfig;
|
||||
if (typeof backupConfig.provider !== 'string') return next(new HttpError(400, 'provider is required'));
|
||||
if ('password' in backupConfig && typeof backupConfig.password !== 'string') return next(new HttpError(400, 'password must be a string'));
|
||||
if (typeof backupConfig.format !== 'string') return next(new HttpError(400, 'format must be a string'));
|
||||
@@ -121,8 +121,13 @@ function restore(req, res, next) {
|
||||
if (typeof req.body.version !== 'string') return next(new HttpError(400, 'version must be a string'));
|
||||
|
||||
if ('sysinfoConfig' in req.body && typeof req.body.sysinfoConfig !== 'object') return next(new HttpError(400, 'sysinfoConfig must be an object'));
|
||||
if ('skipDnsSetup' in req.body && typeof req.body.skipDnsSetup !== 'boolean') return next(new HttpError(400, 'skipDnsSetup must be a boolean'));
|
||||
|
||||
provision.restore(backupConfig, req.body.backupId, req.body.version, req.body.sysinfoConfig || { provider: 'generic' }, auditSource.fromRequest(req), function (error) {
|
||||
const options = {
|
||||
skipDnsSetup: req.body.skipDnsSetup || false
|
||||
};
|
||||
|
||||
provision.restore(backupConfig, req.body.backupId, req.body.version, req.body.sysinfoConfig || { provider: 'generic' }, options, auditSource.fromRequest(req), function (error) {
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
|
||||
next(new HttpSuccess(200, {}));
|
||||
|
||||
@@ -27,6 +27,8 @@ function getAll(req, res, next) {
|
||||
function get(req, res, next) {
|
||||
assert.strictEqual(typeof req.params.service, 'string');
|
||||
|
||||
req.clearTimeout();
|
||||
|
||||
services.getServiceStatus(req.params.service, function (error, result) {
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
|
||||
|
||||
@@ -194,10 +194,13 @@ function getRegistryConfig(req, res, next) {
|
||||
function setRegistryConfig(req, res, next) {
|
||||
assert.strictEqual(typeof req.body, 'object');
|
||||
|
||||
if (typeof req.body.serverAddress !== 'string') return next(new HttpError(400, 'serverAddress is required'));
|
||||
if ('username' in req.body && typeof req.body.username !== 'string') return next(new HttpError(400, 'username is required'));
|
||||
if ('email' in req.body && typeof req.body.email !== 'string') return next(new HttpError(400, 'email is required'));
|
||||
if ('password' in req.body && typeof req.body.password !== 'string') return next(new HttpError(400, 'password is required'));
|
||||
if (!req.body.provider || typeof req.body.provider !== 'string') return next(new HttpError(400, 'provider is required'));
|
||||
if (req.body.provider !== 'noop') {
|
||||
if (typeof req.body.serverAddress !== 'string') return next(new HttpError(400, 'serverAddress is required'));
|
||||
if ('username' in req.body && typeof req.body.username !== 'string') return next(new HttpError(400, 'username is required'));
|
||||
if ('email' in req.body && typeof req.body.email !== 'string') return next(new HttpError(400, 'email is required'));
|
||||
if ('password' in req.body && typeof req.body.password !== 'string') return next(new HttpError(400, 'password is required'));
|
||||
}
|
||||
|
||||
settings.setRegistryConfig(req.body, function (error) {
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
|
||||
@@ -4,18 +4,16 @@ exports = module.exports = {
|
||||
get
|
||||
};
|
||||
|
||||
const domains = require('../domains.js'),
|
||||
HttpError = require('connect-lastmile').HttpError;
|
||||
const HttpError = require('connect-lastmile').HttpError,
|
||||
wellknown = require('../wellknown.js');
|
||||
|
||||
function get(req, res, next) {
|
||||
const host = req.headers['host'];
|
||||
const location = req.params[0];
|
||||
|
||||
domains.get(host, function (error, domain) {
|
||||
wellknown.get(host, location, function (error, result) {
|
||||
if (error) return next(new HttpError(404, error.message));
|
||||
|
||||
const location = req.params[0];
|
||||
if (!domain.wellKnown || !(location in domain.wellKnown)) return next(new HttpError(404, 'No custom well-known config'));
|
||||
|
||||
res.status(200).send(domain.wellKnown[location]);
|
||||
res.status(200).set('content-type', result.type).send(result.body);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
echo "This script should be run as root." > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
readonly BOX_SRC_DIR=/home/yellowtent/box
|
||||
|
||||
if [[ $# == 1 && "$1" == "--check" ]]; then
|
||||
echo "OK"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Retiring cloudron"
|
||||
|
||||
if [[ "${BOX_ENV}" != "cloudron" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Stopping apps"
|
||||
systemctl stop docker # stop the apps
|
||||
|
||||
# do this at the end since stopping the box will kill this script as well
|
||||
echo "Stopping Cloudron Smartserver"
|
||||
"${BOX_SRC_DIR}/setup/stop.sh"
|
||||
@@ -109,6 +109,7 @@ function initializeExpressSync() {
|
||||
router.post('/api/v1/cloudron/prepare_dashboard_domain', json, token, authorizeAdmin, routes.cloudron.prepareDashboardDomain);
|
||||
router.post('/api/v1/cloudron/set_dashboard_domain', json, token, authorizeAdmin, routes.cloudron.updateDashboardDomain);
|
||||
router.post('/api/v1/cloudron/renew_certs', json, token, authorizeAdmin, routes.cloudron.renewCerts);
|
||||
router.post('/api/v1/cloudron/sync_dns', json, token, authorizeAdmin, routes.cloudron.syncDnsRecords);
|
||||
router.post('/api/v1/cloudron/check_for_updates', json, token, authorizeAdmin, routes.cloudron.checkForUpdates);
|
||||
router.get ('/api/v1/cloudron/reboot', token, authorizeAdmin, routes.cloudron.isRebootRequired);
|
||||
router.post('/api/v1/cloudron/reboot', json, token, authorizeAdmin, routes.cloudron.reboot);
|
||||
@@ -275,7 +276,6 @@ function initializeExpressSync() {
|
||||
router.post('/api/v1/mail/:domain/catch_all', json, token, authorizeAdmin, routes.mail.setCatchAllAddress);
|
||||
router.post('/api/v1/mail/:domain/relay', json, token, authorizeAdmin, routes.mail.setMailRelay);
|
||||
router.post('/api/v1/mail/:domain/enable', json, token, authorizeAdmin, routes.mail.setMailEnabled);
|
||||
router.post('/api/v1/mail/:domain/dns', json, token, authorizeAdmin, routes.mail.setDnsRecords);
|
||||
router.post('/api/v1/mail/:domain/banner', json, token, authorizeAdmin, routes.mail.setBanner);
|
||||
router.post('/api/v1/mail/:domain/send_test_mail', json, token, authorizeAdmin, routes.mail.sendTestMail);
|
||||
router.get ('/api/v1/mail/:domain/mailbox_count', token, authorizeAdmin, routes.mail.getMailboxCount);
|
||||
|
||||
@@ -22,7 +22,6 @@ exports = module.exports = {
|
||||
clearAddons,
|
||||
|
||||
getEnvironment,
|
||||
getMountsSync,
|
||||
getContainerNamesSync,
|
||||
|
||||
getContainerDetails,
|
||||
@@ -39,7 +38,7 @@ var appdb = require('./appdb.js'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
constants = require('./constants.js'),
|
||||
crypto = require('crypto'),
|
||||
debug = require('debug')('box:addons'),
|
||||
debug = require('debug')('box:services'),
|
||||
docker = require('./docker.js'),
|
||||
fs = require('fs'),
|
||||
graphite = require('./graphite.js'),
|
||||
@@ -160,6 +159,13 @@ var ADDONS = {
|
||||
restore: NOOP,
|
||||
clear: NOOP,
|
||||
},
|
||||
tls: {
|
||||
setup: NOOP,
|
||||
teardown: NOOP,
|
||||
backup: NOOP,
|
||||
restore: NOOP,
|
||||
clear: NOOP,
|
||||
},
|
||||
oauth: { // kept for backward compatibility. keep teardown for uninstall to work
|
||||
setup: NOOP,
|
||||
teardown: teardownOauth,
|
||||
@@ -295,7 +301,7 @@ function containerStatus(containerName, tokenEnvName, callback) {
|
||||
if (error && (error.reason === BoxError.NOT_FOUND || error.reason === BoxError.INACTIVE)) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
|
||||
if (error) return callback(error);
|
||||
|
||||
request.get(`https://${addonDetails.ip}:3000/healthcheck?access_token=${addonDetails.token}`, { json: true, rejectUnauthorized: false, timeout: 3000 }, function (error, response) {
|
||||
request.get(`https://${addonDetails.ip}:3000/healthcheck?access_token=${addonDetails.token}`, { json: true, rejectUnauthorized: false, timeout: 20000 }, function (error, response) {
|
||||
if (error) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}: ${error.message}` });
|
||||
if (response.statusCode !== 200 || !response.body.status) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}. Status code: ${response.statusCode} message: ${response.body.message}` });
|
||||
|
||||
@@ -534,7 +540,7 @@ function rebuildService(id, callback) {
|
||||
if (id === 'mongodb') return startMongodb({ version: 'none' }, callback);
|
||||
if (id === 'postgresql') return startPostgresql({ version: 'none' }, callback);
|
||||
if (id === 'mysql') return startMysql({ version: 'none' }, callback);
|
||||
if (id === 'sftp') return sftp.start({ version: 'none' }, serviceConfig, callback);
|
||||
if (id === 'sftp') return sftp.rebuild(serviceConfig, { /* options */ }, callback);
|
||||
if (id === 'graphite') return graphite.start({ version: 'none' }, serviceConfig, callback);
|
||||
|
||||
// nothing to rebuild for now.
|
||||
@@ -608,7 +614,7 @@ function waitForContainer(containerName, tokenEnvName, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.retry({ times: 10, interval: 15000 }, function (retryCallback) {
|
||||
request.get(`https://${result.ip}:3000/healthcheck?access_token=${result.token}`, { json: true, rejectUnauthorized: false, timeout: 3000 }, function (error, response) {
|
||||
request.get(`https://${result.ip}:3000/healthcheck?access_token=${result.token}`, { json: true, rejectUnauthorized: false, timeout: 5000 }, function (error, response) {
|
||||
if (error) return retryCallback(new BoxError(BoxError.ADDONS_ERROR, `Network error waiting for ${containerName}: ${error.message}`));
|
||||
if (response.statusCode !== 200 || !response.body.status) return retryCallback(new BoxError(BoxError.ADDONS_ERROR, `Error waiting for ${containerName}. Status code: ${response.statusCode} message: ${response.body.message}`));
|
||||
|
||||
@@ -763,10 +769,10 @@ function exportDatabase(addon, callback) {
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
appdb.getAll(function (error, apps) {
|
||||
appdb.getAll(function (error, allApps) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(apps, function iterator (app, iteratorCallback) {
|
||||
async.eachSeries(allApps, function iterator (app, iteratorCallback) {
|
||||
if (!app.manifest.addons || !(addon in app.manifest.addons)) return iteratorCallback(); // app doesn't use the addon
|
||||
|
||||
debug(`exportDatabase: Exporting addon ${addon} of app ${app.id}`);
|
||||
@@ -774,7 +780,8 @@ function exportDatabase(addon, callback) {
|
||||
ADDONS[addon].backup(app, app.manifest.addons[addon], function (error) {
|
||||
if (error) {
|
||||
debug(`exportDatabase: Error exporting ${addon} of app ${app.id}.`, error);
|
||||
return iteratorCallback(error);
|
||||
// for errored apps, we can ignore if export had an error
|
||||
return iteratorCallback(app.installationState === apps.ISTATE_ERROR ? null : error);
|
||||
}
|
||||
|
||||
iteratorCallback();
|
||||
@@ -831,6 +838,7 @@ function startServices(existingInfra, callback) {
|
||||
if (existingInfra.version !== infra.version) {
|
||||
debug(`startServices: ${existingInfra.version} -> ${infra.version}. starting all services`);
|
||||
startFuncs.push(
|
||||
mail.startMail, // start this first to reduce email downtime
|
||||
startTurn.bind(null, existingInfra, servicesConfig['turn'] || {}),
|
||||
startMysql.bind(null, existingInfra),
|
||||
startPostgresql.bind(null, existingInfra),
|
||||
@@ -838,15 +846,15 @@ function startServices(existingInfra, callback) {
|
||||
startRedis.bind(null, existingInfra),
|
||||
graphite.start.bind(null, existingInfra, servicesConfig['graphite'] || {}),
|
||||
sftp.start.bind(null, existingInfra, servicesConfig['sftp'] || {}),
|
||||
mail.startMail);
|
||||
);
|
||||
} else {
|
||||
assert.strictEqual(typeof existingInfra.images, 'object');
|
||||
|
||||
if (infra.images.mail.tag !== existingInfra.images.mail.tag) startFuncs.push(mail.startMail); // start this first to reduce email downtime
|
||||
if (infra.images.turn.tag !== existingInfra.images.turn.tag) startFuncs.push(startTurn.bind(null, existingInfra, servicesConfig['turn'] || {}));
|
||||
if (infra.images.mysql.tag !== existingInfra.images.mysql.tag) startFuncs.push(startMysql.bind(null, existingInfra));
|
||||
if (infra.images.postgresql.tag !== existingInfra.images.postgresql.tag) startFuncs.push(startPostgresql.bind(null, existingInfra));
|
||||
if (infra.images.mongodb.tag !== existingInfra.images.mongodb.tag) startFuncs.push(startMongodb.bind(null, existingInfra));
|
||||
if (infra.images.mail.tag !== existingInfra.images.mail.tag) startFuncs.push(mail.startMail);
|
||||
if (infra.images.redis.tag !== existingInfra.images.redis.tag) startFuncs.push(startRedis.bind(null, existingInfra));
|
||||
if (infra.images.graphite.tag !== existingInfra.images.graphite.tag) startFuncs.push(graphite.start.bind(null, existingInfra, servicesConfig['graphite'] || {}));
|
||||
if (infra.images.sftp.tag !== existingInfra.images.sftp.tag) startFuncs.push(sftp.start.bind(null, existingInfra, servicesConfig['sftp'] || {}));
|
||||
@@ -884,31 +892,6 @@ function getEnvironment(app, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function getMountsSync(app, addons) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert(!addons || typeof addons === 'object');
|
||||
|
||||
let mounts = [ ];
|
||||
|
||||
if (!addons) return mounts;
|
||||
|
||||
for (let addon in addons) {
|
||||
switch (addon) {
|
||||
case 'localstorage':
|
||||
mounts.push({
|
||||
Target: '/app/data',
|
||||
Source: `${app.id}-localstorage`,
|
||||
Type: 'volume',
|
||||
ReadOnly: false
|
||||
});
|
||||
break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
return mounts;
|
||||
}
|
||||
|
||||
function getContainerNamesSync(app, addons) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert(!addons || typeof addons === 'object');
|
||||
@@ -2101,7 +2084,7 @@ function statusGraphite(callback) {
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
|
||||
if (error) return callback(error);
|
||||
|
||||
request.get('http://127.0.0.1:8417/graphite-web/dashboard', { json: true, timeout: 3000 }, function (error, response) {
|
||||
request.get('http://127.0.0.1:8417/graphite-web/dashboard', { json: true, timeout: 20000 }, function (error, response) {
|
||||
if (error) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite: ${error.message}` });
|
||||
if (response.statusCode !== 200) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite. Status code: ${response.statusCode} message: ${response.body.message}` });
|
||||
|
||||
|
||||
@@ -159,7 +159,9 @@ let gDefaults = (function () {
|
||||
provider: 'noop',
|
||||
autoCreate: false
|
||||
};
|
||||
result[exports.REGISTRY_CONFIG_KEY] = {};
|
||||
result[exports.REGISTRY_CONFIG_KEY] = {
|
||||
provider: 'noop'
|
||||
};
|
||||
result[exports.SYSINFO_CONFIG_KEY] = {
|
||||
provider: 'generic'
|
||||
};
|
||||
|
||||
11
src/sftp.js
11
src/sftp.js
@@ -2,6 +2,7 @@
|
||||
|
||||
exports = module.exports = {
|
||||
start,
|
||||
rebuild,
|
||||
|
||||
DEFAULT_MEMORY_LIMIT: 256 * 1024 * 1024
|
||||
};
|
||||
@@ -21,13 +22,14 @@ var apps = require('./apps.js'),
|
||||
_ = require('underscore');
|
||||
|
||||
var gRebuildInProgress = false;
|
||||
function rebuild(serviceConfig, callback) {
|
||||
function rebuild(serviceConfig, options, callback) {
|
||||
assert.strictEqual(typeof serviceConfig, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (gRebuildInProgress) {
|
||||
debug('waiting for other rebuild to finish');
|
||||
return setTimeout(function () { rebuild(serviceConfig, callback); }, 5000);
|
||||
return setTimeout(function () { rebuild(serviceConfig, options, callback); }, 5000);
|
||||
}
|
||||
|
||||
gRebuildInProgress = true;
|
||||
@@ -39,6 +41,7 @@ function rebuild(serviceConfig, callback) {
|
||||
|
||||
debug('rebuilding container');
|
||||
|
||||
const force = !!options.force;
|
||||
const tag = infra.images.sftp.tag;
|
||||
const memoryLimit = serviceConfig.memoryLimit || exports.DEFAULT_MEMORY_LIMIT;
|
||||
const memory = system.getMemoryAllocation(memoryLimit);
|
||||
@@ -84,7 +87,7 @@ function rebuild(serviceConfig, callback) {
|
||||
currentDataDirs.sort(function (a, b) { return a.hostDir < b.hostDir ? -1 : 1; });
|
||||
dataDirs.sort(function (a, b) { return a.hostDir < b.hostDir ? -1 : 1; });
|
||||
|
||||
if (_.isEqual(currentDataDirs, dataDirs)) {
|
||||
if (!force && _.isEqual(currentDataDirs, dataDirs)) {
|
||||
debug('Skipping rebuild, no changes');
|
||||
return done();
|
||||
}
|
||||
@@ -127,5 +130,5 @@ function start(existingInfra, serviceConfig, callback) {
|
||||
assert.strictEqual(typeof serviceConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
rebuild(serviceConfig, callback);
|
||||
rebuild(serviceConfig, { force: true }, callback); // force rebuild when infra changed
|
||||
}
|
||||
|
||||
@@ -1,22 +1,23 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
getBackupPath: getBackupPath,
|
||||
checkPreconditions: checkPreconditions,
|
||||
getBackupPath,
|
||||
checkPreconditions,
|
||||
|
||||
upload: upload,
|
||||
download: download,
|
||||
upload,
|
||||
download,
|
||||
|
||||
copy: copy,
|
||||
copy,
|
||||
|
||||
listDir: listDir,
|
||||
exists,
|
||||
listDir,
|
||||
|
||||
remove: remove,
|
||||
removeDir: removeDir,
|
||||
remove,
|
||||
removeDir,
|
||||
|
||||
testConfig: testConfig,
|
||||
removePrivateFields: removePrivateFields,
|
||||
injectPrivateFields: injectPrivateFields
|
||||
testConfig,
|
||||
removePrivateFields,
|
||||
injectPrivateFields
|
||||
};
|
||||
|
||||
const PROVIDER_FILESYSTEM = 'filesystem';
|
||||
@@ -136,6 +137,20 @@ function download(apiConfig, sourceFilePath, callback) {
|
||||
callback(null, fileStream);
|
||||
}
|
||||
|
||||
function exists(apiConfig, sourceFilePath, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof sourceFilePath, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// do not use existsSync because it does not return EPERM etc
|
||||
if (!safe.fs.statSync(sourceFilePath)) {
|
||||
if (safe.error && safe.error.code === 'ENOENT') return callback(null, false);
|
||||
if (safe.error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Exists ${sourceFilePath}: ${safe.error.message}`));
|
||||
}
|
||||
|
||||
callback(null, true);
|
||||
}
|
||||
|
||||
function listDir(apiConfig, dir, batchSize, iteratorCallback, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof dir, 'string');
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
getBackupPath: getBackupPath,
|
||||
checkPreconditions: checkPreconditions,
|
||||
getBackupPath,
|
||||
checkPreconditions,
|
||||
|
||||
upload: upload,
|
||||
download: download,
|
||||
copy: copy,
|
||||
upload,
|
||||
exists,
|
||||
download,
|
||||
copy,
|
||||
|
||||
listDir: listDir,
|
||||
listDir,
|
||||
|
||||
remove: remove,
|
||||
removeDir: removeDir,
|
||||
remove,
|
||||
removeDir,
|
||||
|
||||
testConfig: testConfig,
|
||||
removePrivateFields: removePrivateFields,
|
||||
injectPrivateFields: injectPrivateFields,
|
||||
testConfig,
|
||||
removePrivateFields,
|
||||
injectPrivateFields,
|
||||
|
||||
// Used to mock GCS
|
||||
_mockInject: mockInject,
|
||||
@@ -100,6 +101,36 @@ function upload(apiConfig, backupFilePath, sourceStream, callback) {
|
||||
sourceStream.pipe(uploadStream);
|
||||
}
|
||||
|
||||
function exists(apiConfig, backupFilePath, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupFilePath, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const bucket = getBucket(apiConfig);
|
||||
|
||||
if (!backupFilePath.endsWith('/')) {
|
||||
const file = bucket.file(backupFilePath);
|
||||
file.getMetadata(function (error) {
|
||||
if (error && error.code === 404) return callback(null, false);
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
callback(null, true);
|
||||
});
|
||||
} else {
|
||||
const query = {
|
||||
prefix: backupFilePath,
|
||||
maxResults: 1,
|
||||
autoPaginate: true
|
||||
};
|
||||
|
||||
bucket.getFiles(query, function (error, files) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, files.length !== 0);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function download(apiConfig, backupFilePath, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupFilePath, 'string');
|
||||
@@ -135,7 +166,7 @@ function listDir(apiConfig, backupFilePath, batchSize, iteratorCallback, callbac
|
||||
|
||||
let done = false;
|
||||
|
||||
async.whilst(() => !done, function listAndDownload(whilstCallback) {
|
||||
async.whilst((testDone) => testDone(null, !done), function listAndDownload(whilstCallback) {
|
||||
bucket.getFiles(query, function (error, files, nextQuery) {
|
||||
if (error) return whilstCallback(error);
|
||||
|
||||
@@ -212,7 +243,7 @@ function removeDir(apiConfig, pathPrefix) {
|
||||
|
||||
var events = new EventEmitter();
|
||||
|
||||
const batchSize = 1000, concurrency = 10; // https://googleapis.dev/nodejs/storage/latest/Bucket.html#deleteFiles
|
||||
const batchSize = 1000, concurrency = apiConfig.deleteConcurrency || 10; // https://googleapis.dev/nodejs/storage/latest/Bucket.html#deleteFiles
|
||||
var total = 0;
|
||||
|
||||
listDir(apiConfig, pathPrefix, batchSize, function (entries, done) {
|
||||
|
||||
@@ -11,23 +11,25 @@
|
||||
// for the other API calls we leave it to the backend to retry. this allows
|
||||
// them to tune the concurrency based on failures/rate limits accordingly
|
||||
exports = module.exports = {
|
||||
getBackupPath: getBackupPath,
|
||||
checkPreconditions: checkPreconditions,
|
||||
getBackupPath,
|
||||
checkPreconditions,
|
||||
|
||||
upload: upload,
|
||||
upload,
|
||||
|
||||
download: download,
|
||||
downloadDir: downloadDir,
|
||||
copy: copy,
|
||||
exists,
|
||||
|
||||
listDir: listDir,
|
||||
download,
|
||||
downloadDir,
|
||||
copy,
|
||||
|
||||
remove: remove,
|
||||
removeDir: removeDir,
|
||||
listDir,
|
||||
|
||||
testConfig: testConfig,
|
||||
removePrivateFields: removePrivateFields,
|
||||
injectPrivateFields: injectPrivateFields
|
||||
remove,
|
||||
removeDir,
|
||||
|
||||
testConfig,
|
||||
removePrivateFields,
|
||||
injectPrivateFields
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
@@ -72,6 +74,14 @@ function upload(apiConfig, backupFilePath, sourceStream, callback) {
|
||||
callback(new BoxError(BoxError.NOT_IMPLEMENTED, 'upload is not implemented'));
|
||||
}
|
||||
|
||||
function exists(apiConfig, backupFilePath, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupFilePath, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback(new BoxError(BoxError.NOT_IMPLEMENTED, 'exists is not implemented'));
|
||||
}
|
||||
|
||||
function download(apiConfig, backupFilePath, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupFilePath, 'string');
|
||||
|
||||
@@ -1,22 +1,23 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
getBackupPath: getBackupPath,
|
||||
checkPreconditions: checkPreconditions,
|
||||
getBackupPath,
|
||||
checkPreconditions,
|
||||
|
||||
upload: upload,
|
||||
download: download,
|
||||
downloadDir: downloadDir,
|
||||
copy: copy,
|
||||
upload,
|
||||
exists,
|
||||
download,
|
||||
downloadDir,
|
||||
copy,
|
||||
|
||||
listDir: listDir,
|
||||
listDir,
|
||||
|
||||
remove: remove,
|
||||
removeDir: removeDir,
|
||||
remove,
|
||||
removeDir,
|
||||
|
||||
testConfig: testConfig,
|
||||
removePrivateFields: removePrivateFields,
|
||||
injectPrivateFields: injectPrivateFields
|
||||
testConfig,
|
||||
removePrivateFields,
|
||||
injectPrivateFields
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
@@ -49,6 +50,16 @@ function upload(apiConfig, backupFilePath, sourceStream, callback) {
|
||||
callback(null);
|
||||
}
|
||||
|
||||
function exists(apiConfig, backupFilePath, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupFilePath, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('exists: %s', backupFilePath);
|
||||
|
||||
callback(null, false);
|
||||
}
|
||||
|
||||
function download(apiConfig, backupFilePath, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupFilePath, 'string');
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
getBackupPath: getBackupPath,
|
||||
checkPreconditions: checkPreconditions,
|
||||
getBackupPath,
|
||||
checkPreconditions,
|
||||
|
||||
upload: upload,
|
||||
download: download,
|
||||
copy: copy,
|
||||
upload,
|
||||
exists,
|
||||
download,
|
||||
copy,
|
||||
|
||||
listDir: listDir,
|
||||
listDir,
|
||||
|
||||
remove: remove,
|
||||
removeDir: removeDir,
|
||||
remove,
|
||||
removeDir,
|
||||
|
||||
testConfig: testConfig,
|
||||
removePrivateFields: removePrivateFields,
|
||||
injectPrivateFields: injectPrivateFields,
|
||||
testConfig,
|
||||
removePrivateFields,
|
||||
injectPrivateFields,
|
||||
|
||||
// Used to mock AWS
|
||||
_mockInject: mockInject,
|
||||
@@ -56,7 +57,7 @@ function getS3Config(apiConfig, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var credentials = {
|
||||
let credentials = {
|
||||
signatureVersion: apiConfig.signatureVersion || 'v4',
|
||||
s3ForcePathStyle: false, // Use vhost style instead of path style - https://forums.aws.amazon.com/ann.jspa?annID=6776
|
||||
accessKeyId: apiConfig.accessKeyId,
|
||||
@@ -64,7 +65,7 @@ function getS3Config(apiConfig, callback) {
|
||||
region: apiConfig.region || 'us-east-1',
|
||||
maxRetries: 10,
|
||||
retryDelayOptions: {
|
||||
customBackoff: () => 20000 // constant backoff - https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Config.html#retryDelayOptions-property
|
||||
customBackoff: (/* retryCount, error */) => 20000 // constant backoff - https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Config.html#retryDelayOptions-property
|
||||
},
|
||||
httpOptions: {
|
||||
connectTimeout: 60000, // https://github.com/aws/aws-sdk-js/pull/1446
|
||||
@@ -137,6 +138,45 @@ function upload(apiConfig, backupFilePath, sourceStream, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function exists(apiConfig, backupFilePath, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupFilePath, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getS3Config(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const s3 = new AWS.S3(_.omit(credentials, 'retryDelayOptions', 'maxRetries'));
|
||||
|
||||
if (!backupFilePath.endsWith('/')) { // check for file
|
||||
const params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Key: backupFilePath
|
||||
};
|
||||
|
||||
s3.headObject(params, function (error) {
|
||||
if (!Object.keys(this.httpResponse.headers).some(h => h.startsWith('x-amz'))) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'not a s3 endpoint'));
|
||||
if (error && S3_NOT_FOUND(error)) return callback(null, false);
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code));
|
||||
|
||||
callback(null, true);
|
||||
});
|
||||
} else { // list dir contents
|
||||
const listParams = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Prefix: backupFilePath,
|
||||
MaxKeys: 1
|
||||
};
|
||||
|
||||
s3.listObjects(listParams, function (error, listData) {
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code));
|
||||
|
||||
callback(null, listData.Contents.length !== 0);
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function download(apiConfig, backupFilePath, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupFilePath, 'string');
|
||||
@@ -189,7 +229,7 @@ function listDir(apiConfig, dir, batchSize, iteratorCallback, callback) {
|
||||
|
||||
let done = false;
|
||||
|
||||
async.whilst(() => !done, function listAndDownload(whilstCallback) {
|
||||
async.whilst((testDone) => testDone(null, !done), function listAndDownload(whilstCallback) {
|
||||
s3.listObjects(listParams, function (error, listData) {
|
||||
if (error) return whilstCallback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code));
|
||||
|
||||
|
||||
@@ -23,7 +23,8 @@ const apps = require('./apps.js'),
|
||||
|
||||
const dfAsync = async.asyncify(df), dfFileAsync = async.asyncify(df.file);
|
||||
|
||||
function getVolumeDisks(callback) {
|
||||
function getVolumeDisks(appsDataDisk, callback) {
|
||||
assert.strictEqual(typeof appsDataDisk, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
let volumeDisks = {};
|
||||
@@ -33,7 +34,7 @@ function getVolumeDisks(callback) {
|
||||
|
||||
async.eachSeries(allVolumes, function (volume, iteratorDone) {
|
||||
dfFileAsync(volume.hostPath, function (error, result) {
|
||||
volumeDisks[volume.id] = error ? volumeDisks.appsDataDisk : result.filesystem; // ignore any errors
|
||||
volumeDisks[volume.id] = error ? appsDataDisk : result.filesystem; // ignore any errors
|
||||
|
||||
iteratorDone();
|
||||
});
|
||||
@@ -43,7 +44,8 @@ function getVolumeDisks(callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function getAppDisks(callback) {
|
||||
function getAppDisks(appsDataDisk, callback) {
|
||||
assert.strictEqual(typeof appsDataDisk, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
let appDisks = {};
|
||||
@@ -53,12 +55,12 @@ function getAppDisks(callback) {
|
||||
|
||||
async.eachSeries(allApps, function (app, iteratorDone) {
|
||||
if (!app.dataDir) {
|
||||
appDisks[app.id] = appDisks.appsDataDisk;
|
||||
appDisks[app.id] = appsDataDisk;
|
||||
return iteratorDone();
|
||||
}
|
||||
|
||||
dfFileAsync(app.dataDir, function (error, result) {
|
||||
appDisks[app.id] = error ? appDisks.appsDataDisk : result.filesystem; // ignore any errors
|
||||
appDisks[app.id] = error ? appsDataDisk : result.filesystem; // ignore any errors
|
||||
iteratorDone();
|
||||
});
|
||||
}, function (error) {
|
||||
@@ -96,8 +98,6 @@ function getDisks(callback) {
|
||||
dfFileAsync.bind(null, paths.APPS_DATA_DIR),
|
||||
dfFileAsync.bind(null, info.DockerRootDir),
|
||||
getBackupDisk,
|
||||
getAppDisks,
|
||||
getVolumeDisks,
|
||||
], function (error, values) {
|
||||
if (error) return callback(new BoxError(BoxError.FS_ERROR, error));
|
||||
|
||||
@@ -112,11 +112,21 @@ function getDisks(callback) {
|
||||
appsDataDisk: values[3].filesystem,
|
||||
dockerDataDisk: values[4].filesystem,
|
||||
backupsDisk: values[5],
|
||||
apps: values[6],
|
||||
volumes: values[7]
|
||||
apps: {}, // filled below
|
||||
volumes: {} // filled below
|
||||
};
|
||||
|
||||
callback(null, disks);
|
||||
async.series([
|
||||
getAppDisks.bind(null, disks.appsDataDisk),
|
||||
getVolumeDisks.bind(null, disks.appsDataDisk)
|
||||
], function (error, values) {
|
||||
if (error) return callback(new BoxError(BoxError.FS_ERROR, error));
|
||||
|
||||
disks.apps = values[0],
|
||||
disks.volumes = values[1];
|
||||
|
||||
callback(null, disks);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
25
src/tasks.js
25
src/tasks.js
@@ -1,20 +1,20 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
get: get,
|
||||
add: add,
|
||||
update: update,
|
||||
setCompleted: setCompleted,
|
||||
setCompletedByType: setCompletedByType,
|
||||
listByTypePaged: listByTypePaged,
|
||||
get,
|
||||
add,
|
||||
update,
|
||||
setCompleted,
|
||||
setCompletedByType,
|
||||
listByTypePaged,
|
||||
|
||||
getLogs: getLogs,
|
||||
getLogs,
|
||||
|
||||
startTask: startTask,
|
||||
stopTask: stopTask,
|
||||
stopAllTasks: stopAllTasks,
|
||||
startTask,
|
||||
stopTask,
|
||||
stopAllTasks,
|
||||
|
||||
removePrivateFields: removePrivateFields,
|
||||
removePrivateFields,
|
||||
|
||||
// task types. if you add a task here, fill up the function table in taskworker and dashboard client.js
|
||||
TASK_APP: 'app',
|
||||
@@ -25,6 +25,7 @@ exports = module.exports = {
|
||||
TASK_CLEAN_BACKUPS: 'cleanBackups',
|
||||
TASK_SYNC_EXTERNAL_LDAP: 'syncExternalLdap',
|
||||
TASK_CHANGE_MAIL_LOCATION: 'changeMailLocation',
|
||||
TASK_SYNC_DNS_RECORDS: 'syncDnsRecords',
|
||||
|
||||
// error codes
|
||||
ESTOPPED: 'stopped',
|
||||
@@ -217,7 +218,7 @@ function stopAllTasks(callback) {
|
||||
debug('stopTask: stopping all tasks');
|
||||
|
||||
gTasks = {}; // this signals startTask() to not set completion status as "crashed"
|
||||
shell.sudo('stopTask', [ STOP_TASK_CMD, 'all' ], {}, callback);
|
||||
shell.sudo('stopTask', [ STOP_TASK_CMD, 'all' ], { cwd: paths.baseDir() }, callback);
|
||||
}
|
||||
|
||||
function listByTypePaged(type, page, perPage, callback) {
|
||||
|
||||
@@ -7,6 +7,7 @@ var apptask = require('./apptask.js'),
|
||||
backups = require('./backups.js'),
|
||||
cloudron = require('./cloudron.js'),
|
||||
database = require('./database.js'),
|
||||
domains = require('./domains.js'),
|
||||
externalLdap = require('./externalldap.js'),
|
||||
fs = require('fs'),
|
||||
mail = require('./mail.js'),
|
||||
@@ -25,6 +26,7 @@ const TASKS = { // indexed by task type
|
||||
cleanBackups: backups.cleanup,
|
||||
syncExternalLdap: externalLdap.sync,
|
||||
changeMailLocation: mail.changeLocation,
|
||||
syncDnsRecords: domains.syncDnsRecords,
|
||||
|
||||
_identity: (arg, progressCallback, callback) => callback(null, arg),
|
||||
_error: (arg, progressCallback, callback) => callback(new Error(`Failed for arg: ${arg}`)),
|
||||
|
||||
@@ -227,7 +227,7 @@ describe('apptask', function () {
|
||||
.post('/2013-04-01/hostedzone/ZONEID/rrset/')
|
||||
.reply(200, js2xml('ChangeResourceRecordSetsResponse', { ChangeInfo: { Id: 'RRID', Status: 'INSYNC' } }));
|
||||
|
||||
apptask._registerSubdomains(APP, true /* overwrite */, function (error) {
|
||||
apptask._registerLocations(APP, true /* overwrite */, function (error) {
|
||||
expect(error).to.be(null);
|
||||
expect(awsScope.isDone()).to.be.ok();
|
||||
done();
|
||||
|
||||
@@ -1863,6 +1863,19 @@ describe('database', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('list all mailboxes succeeds', function (done) {
|
||||
mailboxdb.listAllMailboxes(1, 10, function (error, mailboxes) {
|
||||
expect(error).to.be(null);
|
||||
expect(mailboxes.length).to.be(2);
|
||||
expect(mailboxes[0].name).to.be('girish');
|
||||
expect(mailboxes[1].name).to.be('support');
|
||||
expect(mailboxes[1].domain).to.be(DOMAIN_0.domain);
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
it('can get aliases of name', function (done) {
|
||||
mailboxdb.getAliasesForName('support', DOMAIN_0.domain, function (error, results) {
|
||||
expect(error).to.be(null);
|
||||
|
||||
@@ -85,8 +85,6 @@ function getLanguages(callback) {
|
||||
var jsonFiles = result.filter(function (file) { return path.extname(file) === '.json'; });
|
||||
languages = jsonFiles.map(function (file) { return path.basename(file, '.json'); });
|
||||
|
||||
debug('Languages found:', jsonFiles);
|
||||
|
||||
callback(null, languages);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ function checkAppUpdates(options, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('Checking App Updates');
|
||||
debug('checkAppUpdates: checking for updates');
|
||||
|
||||
let state = getUpdateInfo();
|
||||
let newState = { }; // create new state so that old app ids are removed
|
||||
@@ -50,7 +50,7 @@ function checkAppUpdates(options, callback) {
|
||||
|
||||
appstore.getAppUpdate(app, options, function (error, updateInfo) {
|
||||
if (error) {
|
||||
debug('Error getting app update info for %s', app.id, error);
|
||||
debug('checkAppUpdates: Error getting app update info for %s', app.id, error);
|
||||
return iteratorDone(); // continue to next
|
||||
}
|
||||
|
||||
@@ -59,10 +59,12 @@ function checkAppUpdates(options, callback) {
|
||||
newState[app.id] = updateInfo;
|
||||
|
||||
if (safe.query(state[app.id], 'manifest.version') === updateInfo.manifest.version) {
|
||||
debug(`Skipping app update notification of ${app.id} since user was already notified of ${updateInfo.manifest.version}`);
|
||||
debug(`checkAppUpdates: Skipping app update notification of ${app.id} since user was already notified of ${updateInfo.manifest.version}`);
|
||||
return iteratorDone();
|
||||
}
|
||||
|
||||
debug(`checkAppUpdates: ${app.id} can be updated to ${updateInfo.manifest.id}@${updateInfo.manifest.version}`);
|
||||
|
||||
pendingNotifications.push({ app, updateInfo });
|
||||
iteratorDone();
|
||||
});
|
||||
@@ -80,7 +82,7 @@ function checkBoxUpdates(options, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('Checking Box Updates');
|
||||
debug('checkBoxUpdates: checking for updates');
|
||||
|
||||
appstore.getBoxUpdate(options, function (error, updateInfo) {
|
||||
if (error) return callback(error);
|
||||
@@ -92,14 +94,17 @@ function checkBoxUpdates(options, callback) {
|
||||
delete state.box;
|
||||
setUpdateInfo(state);
|
||||
}
|
||||
debug('checkBoxUpdates: no updates');
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
if (state.box && state.box.version === updateInfo.version) {
|
||||
debug('Skipping notification of box update as user was already notified');
|
||||
debug(`checkBoxUpdates: Skipping notification of box update ${updateInfo.version} as user was already notified`);
|
||||
return callback();
|
||||
}
|
||||
|
||||
debug(`checkBoxUpdates: ${updateInfo.version} is available`);
|
||||
|
||||
const changelog = updateInfo.changelog.map((m) => `* ${m}\n`).join('');
|
||||
|
||||
const message = `Changelog:\n${changelog}\n\nGo to the settings view to update.\n\n`;
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
updateToLatest: updateToLatest,
|
||||
update: update
|
||||
updateToLatest,
|
||||
update
|
||||
};
|
||||
|
||||
var apps = require('./apps.js'),
|
||||
@@ -23,6 +23,7 @@ var apps = require('./apps.js'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
semver = require('semver'),
|
||||
settings = require('./settings.js'),
|
||||
shell = require('./shell.js'),
|
||||
tasks = require('./tasks.js'),
|
||||
updateChecker = require('./updatechecker.js');
|
||||
@@ -98,9 +99,10 @@ function verifyUpdateInfo(versionsFile, updateInfo, callback) {
|
||||
assert.strictEqual(typeof updateInfo, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var releases = safe.JSON.parse(safe.fs.readFileSync(versionsFile, 'utf8')) || { };
|
||||
if (!releases[constants.VERSION] || !releases[constants.VERSION].next) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'No version info'));
|
||||
var nextVersion = releases[constants.VERSION].next;
|
||||
const releases = safe.JSON.parse(safe.fs.readFileSync(versionsFile, 'utf8')) || {};
|
||||
if (!releases[constants.VERSION]) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `No version info for ${constants.VERSION}`));
|
||||
if (!releases[constants.VERSION].next) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `No next version info for ${constants.VERSION}`));
|
||||
const nextVersion = releases[constants.VERSION].next;
|
||||
if (typeof releases[nextVersion] !== 'object' || !releases[nextVersion]) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'No next version info'));
|
||||
if (releases[nextVersion].sourceTarballUrl !== updateInfo.sourceTarballUrl) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Version info mismatch'));
|
||||
|
||||
@@ -219,21 +221,27 @@ function updateToLatest(options, auditSource, callback) {
|
||||
error = locker.lock(locker.OP_BOX_UPDATE);
|
||||
if (error) return callback(new BoxError(BoxError.BAD_STATE, `Cannot update now: ${error.message}`));
|
||||
|
||||
tasks.add(tasks.TASK_UPDATE, [ boxUpdateInfo, options ], function (error, taskId) {
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
eventlog.add(eventlog.ACTION_UPDATE, auditSource, { taskId, boxUpdateInfo });
|
||||
const memoryLimit = 'memoryLimit' in backupConfig ? Math.max(backupConfig.memoryLimit/1024/1024, 400) : 400;
|
||||
|
||||
tasks.startTask(taskId, { timeout: 20 * 60 * 60 * 1000 /* 20 hours */ }, (error) => {
|
||||
locker.unlock(locker.OP_BOX_UPDATE);
|
||||
tasks.add(tasks.TASK_UPDATE, [ boxUpdateInfo, options ], function (error, taskId) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('Update failed with error', error);
|
||||
eventlog.add(eventlog.ACTION_UPDATE, auditSource, { taskId, boxUpdateInfo });
|
||||
|
||||
const timedOut = error.code === tasks.ETIMEOUT;
|
||||
eventlog.add(eventlog.ACTION_UPDATE_FINISH, auditSource, { taskId, errorMessage: error.message, timedOut });
|
||||
tasks.startTask(taskId, { timeout: 20 * 60 * 60 * 1000 /* 20 hours */, nice: 15, memoryLimit }, (error) => {
|
||||
locker.unlock(locker.OP_BOX_UPDATE);
|
||||
|
||||
debug('Update failed with error', error);
|
||||
|
||||
const timedOut = error.code === tasks.ETIMEOUT;
|
||||
eventlog.add(eventlog.ACTION_UPDATE_FINISH, auditSource, { taskId, errorMessage: error.message, timedOut });
|
||||
});
|
||||
|
||||
callback(null, taskId);
|
||||
});
|
||||
|
||||
callback(null, taskId);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ function add(name, hostPath, auditSource, callback) {
|
||||
error = validateHostPath(hostPath);
|
||||
if (error) return callback(error);
|
||||
|
||||
const id = uuid();
|
||||
const id = uuid.v4();
|
||||
|
||||
volumedb.add(id, name, hostPath, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
43
src/wellknown.js
Normal file
43
src/wellknown.js
Normal file
@@ -0,0 +1,43 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
get
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
domains = require('./domains.js'),
|
||||
ejs = require('ejs'),
|
||||
fs = require('fs'),
|
||||
mail = require('./mail.js'),
|
||||
settings = require('./settings.js');
|
||||
|
||||
const MAIL_AUTOCONFIG_EJS = fs.readFileSync(__dirname + '/autoconfig.xml.ejs', { encoding: 'utf8' });
|
||||
|
||||
function get(domain, location, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (location === 'autoconfig/mail/config-v1.1.xml') { // this also gets a ?emailaddress
|
||||
mail.getDomain(domain, function (error, mailDomain) {
|
||||
if (error) return callback(new BoxError(BoxError.NOT_FOUND, error.message));
|
||||
if (!mailDomain.enabled) return callback(new BoxError(BoxError.NOT_FOUND, 'Email not enabled'));
|
||||
|
||||
const autoconfig = ejs.render(MAIL_AUTOCONFIG_EJS, { domain, mailFqdn: settings.mailFqdn() });
|
||||
|
||||
callback(null, { type: 'text/xml', body: autoconfig });
|
||||
});
|
||||
} else if (location === 'host-meta' || location === 'matrix/server') {
|
||||
const type = location === 'host-meta' ? 'text/xml' : 'application/json';
|
||||
|
||||
domains.get(domain, function (error, domainObject) {
|
||||
if (error) return callback(error);
|
||||
if (!domainObject.wellKnown || !(location in domainObject.wellKnown)) return callback(new BoxError(BoxError.NOT_FOUND, 'No custom well-known config'));
|
||||
|
||||
callback(null, { type, body: domainObject.wellKnown[location] });
|
||||
});
|
||||
} else {
|
||||
callback(new BoxError(BoxError.NOT_FOUND, 'No custom well-known config'));
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user