diff --git a/setup/start.sh b/setup/start.sh index 5dfc1b087..49c75e714 100755 --- a/setup/start.sh +++ b/setup/start.sh @@ -6,10 +6,12 @@ echo "==> Cloudron Start" readonly USER="yellowtent" readonly DATA_FILE="/root/user_data.img" -readonly BOX_SRC_DIR="/home/${USER}/box" -readonly DATA_DIR="/home/${USER}/data" -readonly CONFIG_DIR="/home/${USER}/configs" -readonly SETUP_PROGRESS_JSON="/home/${USER}/setup/website/progress.json" +readonly HOME_DIR="/home/${USER}" +readonly BOX_SRC_DIR="${HOME_DIR}/box" +readonly DATA_DIR="${HOME_DIR}/data" # app and platform data +readonly BOX_DATA_DIR="${HOME_DIR}/boxdata" # box data +readonly CONFIG_DIR="${HOME_DIR}/configs" +readonly SETUP_PROGRESS_JSON="${HOME_DIR}/setup/website/progress.json" readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400" @@ -108,7 +110,6 @@ fi # keep these in sync with paths.js echo "==> Ensuring directories" -btrfs subvolume create "${DATA_DIR}/box" &> /dev/null || true if ! btrfs subvolume show "${DATA_DIR}/mail" &> /dev/null; then # Migrate mail data to new format docker stop mail || true # otherwise the move below might fail if mail container writes in the middle @@ -117,9 +118,6 @@ if ! btrfs subvolume show "${DATA_DIR}/mail" &> /dev/null; then [[ -d "${DATA_DIR}/box/mail" ]] && mv "${DATA_DIR}/box/mail/"* "${DATA_DIR}/mail" rm -rf "${DATA_DIR}/box/mail" fi -mkdir -p "${DATA_DIR}/box/appicons" -mkdir -p "${DATA_DIR}/box/certs" -mkdir -p "${DATA_DIR}/box/acme" # acme keys mkdir -p "${DATA_DIR}/graphite" mkdir -p "${DATA_DIR}/mail/dkim" @@ -131,6 +129,15 @@ mkdir -p "${DATA_DIR}/addons/mail" mkdir -p "${DATA_DIR}/collectd/collectd.conf.d" mkdir -p "${DATA_DIR}/acme" +if btrfs subvolume show "${DATA_DIR}/box" &> /dev/null; then + # Migrate box data out of data volume + mv "${DATA_DIR}/box" "${BOX_DATA_DIR}" + btrfs subvolume delete "${DATA_DIR}/box" +fi +mkdir -p "${BOX_DATA_DIR}/appicons" +mkdir -p "${BOX_DATA_DIR}/certs" +mkdir -p "${BOX_DATA_DIR}/acme" # acme keys + echo "==> Configuring journald" sed -e "s/^#SystemMaxUse=.*$/SystemMaxUse=100M/" \ -e "s/^#ForwardToSyslog=.*$/ForwardToSyslog=no/" \ @@ -207,7 +214,7 @@ cp "${script_dir}/start/nginx/nginx.conf" "${DATA_DIR}/nginx/nginx.conf" cp "${script_dir}/start/nginx/mime.types" "${DATA_DIR}/nginx/mime.types" # bookkeep the version as part of data -echo "{ \"version\": \"${arg_version}\", \"boxVersionsUrl\": \"${arg_box_versions_url}\" }" > "${DATA_DIR}/box/version" +echo "{ \"version\": \"${arg_version}\", \"boxVersionsUrl\": \"${arg_box_versions_url}\" }" > "${BOX_DATA_DIR}/version" # remove old snapshots. if we do want to keep this around, we will have to fix the chown -R below # which currently fails because these are readonly fs @@ -234,14 +241,15 @@ if [[ -n "${arg_restore_url}" ]]; then echo "==> Downloading backup: ${arg_restore_url} and key: ${arg_restore_key}" while true; do - if $curl -L "${arg_restore_url}" | openssl aes-256-cbc -d -pass "pass:${arg_restore_key}" | tar -zxf - --overwrite -C "${DATA_DIR}"; then break; fi + if $curl -L "${arg_restore_url}" | openssl aes-256-cbc -d -pass "pass:${arg_restore_key}" \ + | tar -zxf - --overwrite --transform="s,^box/\?,boxdata/," --transform="s,^mail/\?,data/mail/," --show-transformed-names -C "${HOME_DIR}"; then break; fi echo "Failed to download data, trying again" done set_progress "35" "Setting up MySQL" - if [[ -f "${DATA_DIR}/box/box.mysqldump" ]]; then + if [[ -f "${BOX_DATA_DIR}/box.mysqldump" ]]; then echo "==> Importing existing database into MySQL" - mysql -u root -p${mysql_root_password} box < "${DATA_DIR}/box/box.mysqldump" + mysql -u root -p${mysql_root_password} box < "${BOX_DATA_DIR}/box.mysqldump" fi fi @@ -290,7 +298,7 @@ CONF_END echo "==> Changing ownership" chown "${USER}:${USER}" -R "${CONFIG_DIR}" chown "${USER}:${USER}" -R "${DATA_DIR}/nginx" "${DATA_DIR}/collectd" "${DATA_DIR}/addons" "${DATA_DIR}/acme" -chown "${USER}:${USER}" -R "${DATA_DIR}/box" +chown "${USER}:${USER}" -R "${BOX_DATA_DIR}" chown "${USER}:${USER}" -R "${DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys chown "${USER}:${USER}" "${DATA_DIR}/INFRA_VERSION" 2>/dev/null || true chown "${USER}:${USER}" "${DATA_DIR}" diff --git a/setup/start/cloudron-resize-fs.sh b/setup/start/cloudron-resize-fs.sh index 4bf770895..208ef16be 100755 --- a/setup/start/cloudron-resize-fs.sh +++ b/setup/start/cloudron-resize-fs.sh @@ -18,7 +18,7 @@ readonly swap_size=$((${physical_memory} - ${existing_swap})) # if you change th readonly app_count=$((${physical_memory} / 200)) # estimated app count readonly disk_size_bytes=$(fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf "%d", $5 }') # can't rely on fdisk human readable units, using bytes instead readonly disk_size=$((${disk_size_bytes}/1024/1024)) -readonly system_size=10240 # 10 gigs for system libs, apps images, installer, box code and tmp +readonly system_size=10240 # 10 gigs for system libs, apps images, installer, box code, data and tmp readonly ext4_reserved=$((disk_size * 5 / 100)) # this can be changes using tune2fs -m percent /dev/vda1 echo "Disk device: ${disk_device}" diff --git a/src/apps.js b/src/apps.js index 661783264..df7f24373 100644 --- a/src/apps.js +++ b/src/apps.js @@ -563,7 +563,7 @@ function install(data, auditSource, callback) { if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error)); if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error)); - // save cert to data/box/certs + // save cert to boxdata/certs if (cert && key) { if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.cert'), cert)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving cert: ' + safe.error.message)); if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.key'), key)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving key: ' + safe.error.message)); @@ -636,7 +636,7 @@ function configure(appId, data, auditSource, callback) { if (error) return callback(error); } - // save cert to data/box/certs. TODO: move this to apptask when we have a real task queue + // save cert to boxdata/certs. TODO: move this to apptask when we have a real task queue if ('cert' in data && 'key' in data) { if (data.cert && data.key) { error = certificates.validateCertificate(data.cert, data.key, config.appFqdn(location)); diff --git a/src/paths.js b/src/paths.js index dd2009c7b..46c44b17b 100644 --- a/src/paths.js +++ b/src/paths.js @@ -9,7 +9,7 @@ exports = module.exports = { INFRA_VERSION_FILE: path.join(config.baseDir(), 'data/INFRA_VERSION'), DATA_DIR: path.join(config.baseDir(), 'data'), - BOX_DATA_DIR: path.join(config.baseDir(), 'data/box'), + BOX_DATA_DIR: path.join(config.baseDir(), 'boxdata'), ACME_CHALLENGES_DIR: path.join(config.baseDir(), 'data/acme'), ADDON_CONFIG_DIR: path.join(config.baseDir(), 'data/addons'), @@ -20,10 +20,10 @@ exports = module.exports = { NGINX_CERT_DIR: path.join(config.baseDir(), 'data/nginx/cert'), // this is not part of appdata because an icon may be set before install - ACME_ACCOUNT_KEY_FILE: path.join(config.baseDir(), 'data/box/acme/acme.key'), - APP_ICONS_DIR: path.join(config.baseDir(), 'data/box/appicons'), - APP_CERTS_DIR: path.join(config.baseDir(), 'data/box/certs'), - CLOUDRON_AVATAR_FILE: path.join(config.baseDir(), 'data/box/avatar.png'), - FIRST_RUN_FILE: path.join(config.baseDir(), 'data/box/first_run'), - UPDATE_CHECKER_FILE: path.join(config.baseDir(), 'data/box/updatechecker.json') + ACME_ACCOUNT_KEY_FILE: path.join(config.baseDir(), 'boxdata/acme/acme.key'), + APP_ICONS_DIR: path.join(config.baseDir(), 'boxdata/appicons'), + APP_CERTS_DIR: path.join(config.baseDir(), 'boxdata/certs'), + CLOUDRON_AVATAR_FILE: path.join(config.baseDir(), 'boxdata/avatar.png'), + FIRST_RUN_FILE: path.join(config.baseDir(), 'boxdata/first_run'), + UPDATE_CHECKER_FILE: path.join(config.baseDir(), 'boxdata/updatechecker.json') }; diff --git a/src/scripts/backupbox.sh b/src/scripts/backupbox.sh index 30b068bab..37e1404bc 100755 --- a/src/scripts/backupbox.sh +++ b/src/scripts/backupbox.sh @@ -44,21 +44,15 @@ elif [[ "$1" == "filesystem" ]]; then fi # perform backup -BOX_DATA_DIR="${HOME}/data/box" +BOX_DATA_DIR="${HOME}/boxdata" MAIL_DATA_DIR="${HOME}/data/mail" -snapshot_dir="${HOME}/data/snapshots" -box_snapshot_dir="${snapshot_dir}/box" -mail_snapshot_dir="${snapshot_dir}/mail" +mail_snapshot_dir="${HOME}/data/snapshots/mail" echo "Creating MySQL dump" mysqldump -u root -ppassword --single-transaction --routines --triggers box > "${BOX_DATA_DIR}/box.mysqldump" -echo "Snapshotting box" -btrfs subvolume delete "${box_snapshot_dir}" || true -btrfs subvolume snapshot -r "${BOX_DATA_DIR}" "${box_snapshot_dir}" - echo "Snapshotting mail" -btrfs subvolume delete "${mail_snapshot_dir}" || true +btrfs subvolume delete "${mail_snapshot_dir}" &> /dev/null || true btrfs subvolume snapshot -r "${MAIL_DATA_DIR}" "${mail_snapshot_dir}" # will be checked at the end @@ -77,7 +71,7 @@ if [[ "$1" == "s3" ]]; then # use aws instead of curl because curl will always read entire stream memory to set Content-Length # aws will do multipart upload - if tar -czf - -C "${snapshot_dir}" box mail \ + if tar -czf - -C "${HOME}" --transform="s,^boxdata/\?,box/," --transform="s,^data/mail/\?,mail/," --show-transformed-names boxdata data/mail \ | openssl aes-256-cbc -e -pass "pass:${password}" \ | aws ${optional_args} s3 cp - "${s3_url}" 2>"${error_log}"; then break @@ -89,11 +83,11 @@ elif [[ "$1" == "filesystem" ]]; then mkdir -p $(dirname "${backup_folder}/${backup_fileName}") - tar -czf - -C "${snapshot_dir}" box mail | openssl aes-256-cbc -e -pass "pass:${password}" > "${backup_folder}/${backup_fileName}" + tar -czf - -C "${HOME}" --transform="s,^boxdata/\?,box/," --transform="s,^data/mail/\?,mail/," --show-transformed-names boxdata data/mail \ + | openssl aes-256-cbc -e -pass "pass:${password}" > "${backup_folder}/${backup_fileName}" fi echo "Deleting backup snapshot" -btrfs subvolume delete "${box_snapshot_dir}" btrfs subvolume delete "${mail_snapshot_dir}" if [[ ${try} -eq 5 ]]; then diff --git a/src/test/setupTest b/src/test/setupTest index 1fec7ba73..d610a74c3 100755 --- a/src/test/setupTest +++ b/src/test/setupTest @@ -10,7 +10,7 @@ readonly source_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")"/../.. && pwd)" rm -rf $HOME/.cloudron_test 2>/dev/null || true # some of those docker container data requires sudo to be removed mkdir -p $HOME/.cloudron_test cd $HOME/.cloudron_test -mkdir -p data/appdata data/box/appicons data/mail data/addons/mail data/nginx/cert data/nginx/applications data/collectd/collectd.conf.d data/addons configs data/box/certs data/mail/dkim/localhost data/mail/dkim/foobar.com +mkdir -p data/appdata boxdata/appicons data/mail data/addons/mail data/nginx/cert data/nginx/applications data/collectd/collectd.conf.d data/addons configs boxdata/certs data/mail/dkim/localhost data/mail/dkim/foobar.com # put cert openssl req -x509 -newkey rsa:2048 -keyout data/nginx/cert/host.key -out data/nginx/cert/host.cert -days 3650 -subj '/CN=localhost' -nodes