diff --git a/setup/splashpage.sh b/setup/splashpage.sh index b9389544a..a2b6a7c0d 100755 --- a/setup/splashpage.sh +++ b/setup/splashpage.sh @@ -6,12 +6,12 @@ readonly SETUP_WEBSITE_DIR="/home/yellowtent/setup/website" script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" readonly box_src_dir="$(realpath ${script_dir}/..)" -readonly DATA_DIR="/home/yellowtent/data" +readonly PLATFORM_DATA_DIR="/home/yellowtent/platformdata" readonly ADMIN_LOCATION="my" # keep this in sync with constants.js echo "Setting up nginx update page" -if [[ ! -f "${DATA_DIR}/nginx/applications/admin.conf" ]]; then +if [[ ! -f "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf" ]]; then echo "No admin.conf found. This Cloudron has no domain yet. Skip splash setup" exit fi @@ -29,16 +29,16 @@ cp -r "${script_dir}/splash/website/"* "${SETUP_WEBSITE_DIR}" # create nginx config readonly current_infra=$(node -e "console.log(require('${script_dir}/../src/infra_version.js').version);") existing_infra="none" -[[ -f "${DATA_DIR}/INFRA_VERSION" ]] && existing_infra=$(node -e "console.log(JSON.parse(require('fs').readFileSync('${DATA_DIR}/INFRA_VERSION', 'utf8')).version);") +[[ -f "${PLATFORM_DATA_DIR}/INFRA_VERSION" ]] && existing_infra=$(node -e "console.log(JSON.parse(require('fs').readFileSync('${PLATFORM_DATA_DIR}/INFRA_VERSION', 'utf8')).version);") if [[ "${arg_retire_reason}" != "" || "${existing_infra}" != "${current_infra}" ]]; then echo "Showing progress bar on all subdomains in retired mode or infra update. retire: ${arg_retire_reason} existing: ${existing_infra} current: ${current_infra}" - rm -f ${DATA_DIR}/nginx/applications/* + rm -f ${PLATFORM_DATA_DIR}/nginx/applications/* ${box_src_dir}/node_modules/.bin/ejs-cli -f "${script_dir}/start/nginx/appconfig.ejs" \ - -O "{ \"vhost\": \"~^(.+)\$\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\" }" > "${DATA_DIR}/nginx/applications/admin.conf" + -O "{ \"vhost\": \"~^(.+)\$\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\" }" > "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf" else echo "Show progress bar only on admin domain for normal update" ${box_src_dir}/node_modules/.bin/ejs-cli -f "${script_dir}/start/nginx/appconfig.ejs" \ - -O "{ \"vhost\": \"${admin_fqdn}\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\" }" > "${DATA_DIR}/nginx/applications/admin.conf" + -O "{ \"vhost\": \"${admin_fqdn}\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\" }" > "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf" fi if [[ "${arg_retire_reason}" == "migrate" ]]; then diff --git a/setup/start.sh b/setup/start.sh index 22fff0918..35c2bc1e5 100755 --- a/setup/start.sh +++ b/setup/start.sh @@ -5,10 +5,13 @@ set -eu -o pipefail echo "==> Cloudron Start" readonly USER="yellowtent" -readonly DATA_FILE="/root/user_data.img" +# FIXME Remove this and the btrfs aspect +# readonly DATA_FILE="/root/user_data.img" readonly HOME_DIR="/home/${USER}" readonly BOX_SRC_DIR="${HOME_DIR}/box" -readonly DATA_DIR="${HOME_DIR}/data" # app and platform data +readonly OLD_DATA_DIR="${HOME_DIR}/data"; +readonly PLATFORM_DATA_DIR="${HOME_DIR}/platformdata" # platform data +readonly APPS_DATA_DIR="${HOME_DIR}/appsdata" # app data readonly BOX_DATA_DIR="${HOME_DIR}/boxdata" # box data readonly CONFIG_DIR="${HOME_DIR}/configs" readonly SETUP_PROGRESS_JSON="${HOME_DIR}/setup/website/progress.json" @@ -66,42 +69,38 @@ if [[ "${arg_provider}" == "caas" ]]; then systemctl reload sshd fi -echo "==> Setup btrfs data" -if [[ ! -d "${DATA_DIR}" ]]; then - echo "==> Mounting loopback btrfs" - truncate -s "8192m" "${DATA_FILE}" # 8gb start (this will get resized dynamically by cloudron-resize-fs.service) - mkfs.btrfs -L UserDataHome "${DATA_FILE}" - mkdir -p "${DATA_DIR}" - mount -t btrfs -o loop,nosuid "${DATA_FILE}" ${DATA_DIR} -fi - # keep these in sync with paths.js echo "==> Ensuring directories" -if ! btrfs subvolume show "${DATA_DIR}/mail" &> /dev/null; then - # Migrate mail data to new format - docker stop mail || true # otherwise the move below might fail if mail container writes in the middle - rm -rf "${DATA_DIR}/mail" # this used to be mail container's run directory - btrfs subvolume create "${DATA_DIR}/mail" - [[ -d "${DATA_DIR}/box/mail" ]] && mv "${DATA_DIR}/box/mail/"* "${DATA_DIR}/mail" - rm -rf "${DATA_DIR}/box/mail" +if [[ ! -d "${PLATFORM_DATA_DIR}/mail" ]]; then + if [[ -d "${OLD_DATA_DIR}/mail" ]]; then + echo "==> Migrate old mail data" + # Migrate mail data to new format + docker stop mail || true # otherwise the move below might fail if mail container writes in the middle + mv "${OLD_DATA_DIR}/mail" "${PLATFORM_DATA_DIR}/mail" # this used to be mail container's run directory + else + echo "==> Create new mail data dir" + mkdir "${PLATFORM_DATA_DIR}/mail" + fi fi -mkdir -p "${DATA_DIR}/graphite" -mkdir -p "${DATA_DIR}/mail/dkim" +mkdir -p "${PLATFORM_DATA_DIR}/graphite" +mkdir -p "${PLATFORM_DATA_DIR}/mail/dkim" -mkdir -p "${DATA_DIR}/mysql" -mkdir -p "${DATA_DIR}/postgresql" -mkdir -p "${DATA_DIR}/mongodb" -mkdir -p "${DATA_DIR}/snapshots" -mkdir -p "${DATA_DIR}/addons/mail" -mkdir -p "${DATA_DIR}/collectd/collectd.conf.d" -mkdir -p "${DATA_DIR}/acme" +mkdir -p "${PLATFORM_DATA_DIR}/mysql" +mkdir -p "${PLATFORM_DATA_DIR}/postgresql" +mkdir -p "${PLATFORM_DATA_DIR}/mongodb" +mkdir -p "${PLATFORM_DATA_DIR}/snapshots" +mkdir -p "${PLATFORM_DATA_DIR}/addons/mail" +mkdir -p "${PLATFORM_DATA_DIR}/collectd/collectd.conf.d" +mkdir -p "${PLATFORM_DATA_DIR}/acme" +mkdir -p "${APPS_DATA_DIR}" mkdir -p "${BOX_DATA_DIR}" -if btrfs subvolume show "${DATA_DIR}/box" &> /dev/null; then - # Migrate box data out of data volume - mv "${DATA_DIR}/box/"* "${BOX_DATA_DIR}" - btrfs subvolume delete "${DATA_DIR}/box" -fi +# FIXME THIS IS NOT NEEDED ANYMORE I GUESS?? unless we support restore from any backup version +# if btrfs subvolume show "${DATA_DIR}/box" &> /dev/null; then +# # Migrate box data out of data volume +# mv "${DATA_DIR}/box/"* "${BOX_DATA_DIR}" +# btrfs subvolume delete "${DATA_DIR}/box" +# fi mkdir -p "${BOX_DATA_DIR}/appicons" mkdir -p "${BOX_DATA_DIR}/certs" mkdir -p "${BOX_DATA_DIR}/acme" # acme keys @@ -156,18 +155,18 @@ cp "${script_dir}/start/sudoers" /etc/sudoers.d/${USER} echo "==> Configuring collectd" rm -rf /etc/collectd -ln -sfF "${DATA_DIR}/collectd" /etc/collectd -cp "${script_dir}/start/collectd.conf" "${DATA_DIR}/collectd/collectd.conf" +ln -sfF "${PLATFORM_DATA_DIR}/collectd" /etc/collectd +cp "${script_dir}/start/collectd.conf" "${PLATFORM_DATA_DIR}/collectd/collectd.conf" systemctl restart collectd echo "==> Configuring nginx" # link nginx config to system config unlink /etc/nginx 2>/dev/null || rm -rf /etc/nginx -ln -s "${DATA_DIR}/nginx" /etc/nginx -mkdir -p "${DATA_DIR}/nginx/applications" -mkdir -p "${DATA_DIR}/nginx/cert" -cp "${script_dir}/start/nginx/nginx.conf" "${DATA_DIR}/nginx/nginx.conf" -cp "${script_dir}/start/nginx/mime.types" "${DATA_DIR}/nginx/mime.types" +ln -s "${PLATFORM_DATA_DIR}/nginx" /etc/nginx +mkdir -p "${PLATFORM_DATA_DIR}/nginx/applications" +mkdir -p "${PLATFORM_DATA_DIR}/nginx/cert" +cp "${script_dir}/start/nginx/nginx.conf" "${PLATFORM_DATA_DIR}/nginx/nginx.conf" +cp "${script_dir}/start/nginx/mime.types" "${PLATFORM_DATA_DIR}/nginx/mime.types" if ! grep -q "^Restart=" /etc/systemd/system/multi-user.target.wants/nginx.service; then # default nginx service file does not restart on crash echo -e "\n[Service]\nRestart=always\n" >> /etc/systemd/system/multi-user.target.wants/nginx.service @@ -178,11 +177,6 @@ systemctl start nginx # bookkeep the version as part of data echo "{ \"version\": \"${arg_version}\", \"boxVersionsUrl\": \"${arg_box_versions_url}\" }" > "${BOX_DATA_DIR}/version" -# remove old snapshots. if we do want to keep this around, we will have to fix the chown -R below -# which currently fails because these are readonly fs -echo "==> Cleaning up snapshots" -find "${DATA_DIR}/snapshots" -mindepth 1 -maxdepth 1 | xargs --no-run-if-empty btrfs subvolume delete - # restart mysql to make sure it has latest config if [[ ! -f /etc/mysql/mysql.cnf ]] || ! diff -q "${script_dir}/start/mysql.cnf" /etc/mysql/mysql.cnf >/dev/null; then # wait for all running mysql jobs @@ -208,7 +202,7 @@ if [[ -n "${arg_restore_url}" ]]; then while true; do if $curl -L "${arg_restore_url}" | openssl aes-256-cbc -d -pass "pass:${arg_restore_key}" \ - | tar -zxf - --overwrite --transform="s,^box/\?,boxdata/," --transform="s,^mail/\?,data/mail/," --show-transformed-names -C "${HOME_DIR}"; then break; fi + | tar -zxf - --overwrite --transform="s,^box/\?,boxdata/," --transform="s,^mail/\?,platformdata/mail/," --show-transformed-names -C "${HOME_DIR}"; then break; fi echo "Failed to download data, trying again" done @@ -263,11 +257,11 @@ CONF_END echo "==> Changing ownership" chown "${USER}:${USER}" -R "${CONFIG_DIR}" -chown "${USER}:${USER}" -R "${DATA_DIR}/nginx" "${DATA_DIR}/collectd" "${DATA_DIR}/addons" "${DATA_DIR}/acme" +chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme" chown "${USER}:${USER}" -R "${BOX_DATA_DIR}" -chown "${USER}:${USER}" -R "${DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys -chown "${USER}:${USER}" "${DATA_DIR}/INFRA_VERSION" 2>/dev/null || true -chown "${USER}:${USER}" "${DATA_DIR}" +chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys +chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}/INFRA_VERSION" 2>/dev/null || true +chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}" echo "==> Adding automated configs" if [[ ! -z "${arg_backup_config}" ]]; then diff --git a/setup/start/cloudron-resize-fs.sh b/setup/start/cloudron-resize-fs.sh index f4a3096af..5081d955a 100755 --- a/setup/start/cloudron-resize-fs.sh +++ b/setup/start/cloudron-resize-fs.sh @@ -2,10 +2,7 @@ set -eu -o pipefail -readonly USER_HOME="/home/yellowtent" readonly APPS_SWAP_FILE="/apps.swap" -readonly USER_DATA_FILE="/root/user_data.img" -readonly USER_DATA_DIR="/home/yellowtent/data" # all sizes are in mb readonly physical_memory=$(LC_ALL=C free -m | awk '/Mem:/ { print $2 }') @@ -44,14 +41,3 @@ if [[ ${needed_swap_size} -gt 0 ]]; then else echo "Swap requirements already met" fi - -# see start.sh for the initial default size of 8gb. On small disks the calculation might be lower than 8gb resulting in a failure to resize here. -echo "Resizing data volume" -home_data_size=$((disk_size - system_size - swap_size - ext4_reserved)) -echo "Resizing up btrfs user data to size ${home_data_size}M" -umount "${USER_DATA_DIR}" || true -# Do not preallocate (non-sparse). Doing so overallocates for data too much in advance and causes problems when using many apps with smaller data -# fallocate -l "${home_data_size}m" "${USER_DATA_FILE}" # does not overwrite existing data -truncate -s "${home_data_size}m" "${USER_DATA_FILE}" # this will shrink it if the file had existed. this is useful when running this script on a live system -mount -t btrfs -o loop,nosuid "${USER_DATA_FILE}" ${USER_DATA_DIR} -btrfs filesystem resize max "${USER_DATA_DIR}" diff --git a/setup/start/nginx/nginx.conf b/setup/start/nginx/nginx.conf index 12771eeb2..f98581ab7 100644 --- a/setup/start/nginx/nginx.conf +++ b/setup/start/nginx/nginx.conf @@ -51,7 +51,7 @@ http { # acme challenges location /.well-known/acme-challenge/ { default_type text/plain; - alias /home/yellowtent/data/acme/; + alias /home/yellowtent/platformdata/acme/; } location / { diff --git a/src/addons.js b/src/addons.js index 03c6fe18a..e2aee235a 100644 --- a/src/addons.js +++ b/src/addons.js @@ -211,7 +211,7 @@ function getBindsSync(app, addons) { for (var addon in addons) { switch (addon) { - case 'localstorage': binds.push(path.join(paths.DATA_DIR, app.id, 'data') + ':/app/data:rw'); break; + case 'localstorage': binds.push(path.join(paths.APPS_DATA_DIR, app.id, 'data') + ':/app/data:rw'); break; default: break; } } @@ -461,7 +461,7 @@ function backupMySql(app, options, callback) { callback = once(callback); // ChildProcess exit may or may not be called after error - var output = fs.createWriteStream(path.join(paths.DATA_DIR, app.id, 'mysqldump')); + var output = fs.createWriteStream(path.join(paths.APPS_DATA_DIR, app.id, 'mysqldump')); output.on('error', callback); var cmd = [ '/addons/mysql/service.sh', options.multipleDatabases ? 'backup-prefix' : 'backup', app.id ]; @@ -477,7 +477,7 @@ function restoreMySql(app, options, callback) { debugApp(app, 'restoreMySql'); - var input = fs.createReadStream(path.join(paths.DATA_DIR, app.id, 'mysqldump')); + var input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'mysqldump')); input.on('error', callback); var cmd = [ '/addons/mysql/service.sh', options.multipleDatabases ? 'restore-prefix' : 'restore', app.id ]; @@ -526,7 +526,7 @@ function backupPostgreSql(app, options, callback) { callback = once(callback); // ChildProcess exit may or may not be called after error - var output = fs.createWriteStream(path.join(paths.DATA_DIR, app.id, 'postgresqldump')); + var output = fs.createWriteStream(path.join(paths.APPS_DATA_DIR, app.id, 'postgresqldump')); output.on('error', callback); var cmd = [ '/addons/postgresql/service.sh', 'backup', app.id ]; @@ -542,7 +542,7 @@ function restorePostgreSql(app, options, callback) { debugApp(app, 'restorePostgreSql'); - var input = fs.createReadStream(path.join(paths.DATA_DIR, app.id, 'postgresqldump')); + var input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'postgresqldump')); input.on('error', callback); var cmd = [ '/addons/postgresql/service.sh', 'restore', app.id ]; @@ -592,7 +592,7 @@ function backupMongoDb(app, options, callback) { callback = once(callback); // ChildProcess exit may or may not be called after error - var output = fs.createWriteStream(path.join(paths.DATA_DIR, app.id, 'mongodbdump')); + var output = fs.createWriteStream(path.join(paths.APPS_DATA_DIR, app.id, 'mongodbdump')); output.on('error', callback); var cmd = [ '/addons/mongodb/service.sh', 'backup', app.id ]; @@ -608,7 +608,7 @@ function restoreMongoDb(app, options, callback) { debugApp(app, 'restoreMongoDb'); - var input = fs.createReadStream(path.join(paths.DATA_DIR, app.id, 'mongodbdump')); + var input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'mongodbdump')); input.on('error', callback); var cmd = [ '/addons/mongodb/service.sh', 'restore', app.id ]; @@ -624,7 +624,7 @@ function setupRedis(app, options, callback) { var redisPassword = generatePassword(128, false /* memorable */, /[\w\d_]/); // ensure no / in password for being sed friendly (and be uri friendly) var redisVarsFile = path.join(paths.ADDON_CONFIG_DIR, 'redis-' + app.id + '_vars.sh'); - var redisDataDir = path.join(paths.DATA_DIR, app.id + '/redis'); + var redisDataDir = path.join(paths.APPS_DATA_DIR, app.id + '/redis'); if (!safe.fs.writeFileSync(redisVarsFile, 'REDIS_PASSWORD=' + redisPassword)) { return callback(new Error('Error writing redis config')); diff --git a/src/backups.js b/src/backups.js index 508570efe..5d8b91999 100644 --- a/src/backups.js +++ b/src/backups.js @@ -318,7 +318,7 @@ function backupApp(app, manifest, prefix, callback) { appConfig.manifest = manifest; backupFunction = createNewAppBackup.bind(null, app, manifest, prefix); - if (!safe.fs.writeFileSync(path.join(paths.DATA_DIR, app.id + '/config.json'), JSON.stringify(appConfig), 'utf8')) { + if (!safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json'), JSON.stringify(appConfig), 'utf8')) { return callback(safe.error); } } diff --git a/src/cloudron.js b/src/cloudron.js index 0de730fe5..8056d7537 100644 --- a/src/cloudron.js +++ b/src/cloudron.js @@ -868,8 +868,7 @@ function checkDiskSpace(callback) { } var oos = entries.some(function (entry) { - return (entry.mount === paths.DATA_DIR && entry.capacity >= 0.90) || - (entry.mount === '/' && entry.available <= (1.25 * 1024 * 1024)); // 1.5G + return (entry.mount === '/' && entry.available <= (1.25 * 1024 * 1024)); // 1.5G }); debug('Disk space checked. ok: %s', !oos); diff --git a/src/paths.js b/src/paths.js index 46c44b17b..a7050be93 100644 --- a/src/paths.js +++ b/src/paths.js @@ -6,18 +6,20 @@ var config = require('./config.js'), // keep these values in sync with start.sh exports = module.exports = { CLOUDRON_DEFAULT_AVATAR_FILE: path.join(__dirname + '/../assets/avatar.png'), - INFRA_VERSION_FILE: path.join(config.baseDir(), 'data/INFRA_VERSION'), + INFRA_VERSION_FILE: path.join(config.baseDir(), 'platformdata/INFRA_VERSION'), - DATA_DIR: path.join(config.baseDir(), 'data'), + OLD_DATA_DIR: path.join(config.baseDir(), 'data'), + PLATFORM_DATA_DIR: path.join(config.baseDir(), 'platformdata'), + APPS_DATA_DIR: path.join(config.baseDir(), 'appsdata'), BOX_DATA_DIR: path.join(config.baseDir(), 'boxdata'), - ACME_CHALLENGES_DIR: path.join(config.baseDir(), 'data/acme'), - ADDON_CONFIG_DIR: path.join(config.baseDir(), 'data/addons'), - COLLECTD_APPCONFIG_DIR: path.join(config.baseDir(), 'data/collectd/collectd.conf.d'), - MAIL_DATA_DIR: path.join(config.baseDir(), 'data/mail'), - NGINX_CONFIG_DIR: path.join(config.baseDir(), 'data/nginx'), - NGINX_APPCONFIG_DIR: path.join(config.baseDir(), 'data/nginx/applications'), - NGINX_CERT_DIR: path.join(config.baseDir(), 'data/nginx/cert'), + ACME_CHALLENGES_DIR: path.join(config.baseDir(), 'platformdata/acme'), + ADDON_CONFIG_DIR: path.join(config.baseDir(), 'platformdata/addons'), + COLLECTD_APPCONFIG_DIR: path.join(config.baseDir(), 'platformdata/collectd/collectd.conf.d'), + MAIL_DATA_DIR: path.join(config.baseDir(), 'platformdata/mail'), + NGINX_CONFIG_DIR: path.join(config.baseDir(), 'platformdata/nginx'), + NGINX_APPCONFIG_DIR: path.join(config.baseDir(), 'platformdata/nginx/applications'), + NGINX_CERT_DIR: path.join(config.baseDir(), 'platformdata/nginx/cert'), // this is not part of appdata because an icon may be set before install ACME_ACCOUNT_KEY_FILE: path.join(config.baseDir(), 'boxdata/acme/acme.key'), diff --git a/src/platform.js b/src/platform.js index f8e93ff96..2032a0d4f 100644 --- a/src/platform.js +++ b/src/platform.js @@ -146,7 +146,7 @@ function stopContainers(existingInfra, callback) { function startGraphite(callback) { const tag = infra.images.graphite.tag; - const dataDir = paths.DATA_DIR; + const dataDir = paths.PLATFORM_DATA_DIR; const cmd = `docker run --restart=always -d --name="graphite" \ --net cloudron \ @@ -166,11 +166,11 @@ function startGraphite(callback) { function startMysql(callback) { const tag = infra.images.mysql.tag; - const dataDir = paths.DATA_DIR; + const dataDir = paths.PLATFORM_DATA_DIR; const rootPassword = hat(8 * 128); const memoryLimit = (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256; - if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/mysql_vars.sh', + if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mysql_vars.sh', 'MYSQL_ROOT_PASSWORD=' + rootPassword +'\nMYSQL_ROOT_HOST=172.18.0.1', 'utf8')) { return callback(new Error('Could not create mysql var file:' + safe.error.message)); } @@ -191,11 +191,11 @@ function startMysql(callback) { function startPostgresql(callback) { const tag = infra.images.postgresql.tag; - const dataDir = paths.DATA_DIR; + const dataDir = paths.PLATFORM_DATA_DIR; const rootPassword = hat(8 * 128); const memoryLimit = (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256; - if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/postgresql_vars.sh', 'POSTGRESQL_ROOT_PASSWORD=' + rootPassword, 'utf8')) { + if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/postgresql_vars.sh', 'POSTGRESQL_ROOT_PASSWORD=' + rootPassword, 'utf8')) { return callback(new Error('Could not create postgresql var file:' + safe.error.message)); } @@ -215,11 +215,11 @@ function startPostgresql(callback) { function startMongodb(callback) { const tag = infra.images.mongodb.tag; - const dataDir = paths.DATA_DIR; + const dataDir = paths.PLATFORM_CONFIG_DIR; const rootPassword = hat(8 * 128); const memoryLimit = (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 200; - if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/mongodb_vars.sh', 'MONGODB_ROOT_PASSWORD=' + rootPassword, 'utf8')) { + if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mongodb_vars.sh', 'MONGODB_ROOT_PASSWORD=' + rootPassword, 'utf8')) { return callback(new Error('Could not create mongodb var file:' + safe.error.message)); } @@ -248,7 +248,7 @@ function createMailConfig(callback) { var alertsTo = config.provider() === 'caas' ? [ 'support@cloudron.io' ] : [ ]; alertsTo.concat(error ? [] : owner.email).join(','); - if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/mail/mail.ini', + if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mail/mail.ini', `mail_domain=${fqdn}\nmail_server_name=${mailFqdn}\nalerts_from=${alertsFrom}\nalerts_to=${alertsTo}`, 'utf8')) { return callback(new Error('Could not create mail var file:' + safe.error.message)); } @@ -264,15 +264,15 @@ function startMail(callback) { // mail container uses /app/data for backed up data and /run for restart-able data const tag = infra.images.mail.tag; - const dataDir = paths.DATA_DIR; + const dataDir = paths.PLATFORM_DATA_DIR; const memoryLimit = Math.max((1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 128, 256); // admin and mail share the same certificate certificates.getAdminCertificate(function (error, cert, key) { if (error) return callback(error); - if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/mail/tls_cert.pem', cert)) return callback(new Error('Could not create cert file:' + safe.error.message)); - if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/mail/tls_key.pem', key)) return callback(new Error('Could not create key file:' + safe.error.message)); + if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mail/tls_cert.pem', cert)) return callback(new Error('Could not create cert file:' + safe.error.message)); + if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mail/tls_key.pem', key)) return callback(new Error('Could not create key file:' + safe.error.message)); settings.getMailConfig(function (error, mailConfig) { if (error) return callback(error); diff --git a/src/routes/test/apps-test.js b/src/routes/test/apps-test.js index e30c47a08..8453ec68c 100644 --- a/src/routes/test/apps-test.js +++ b/src/routes/test/apps-test.js @@ -747,7 +747,7 @@ describe('App installation', function () { }); it('installation - volume created', function (done) { - expect(fs.existsSync(paths.DATA_DIR + '/' + APP_ID)); + expect(fs.existsSync(paths.APPS_DATA_DIR + '/' + APP_ID)); done(); }); @@ -784,9 +784,9 @@ describe('App installation', function () { // support newer docker versions if (data.Volumes) { - expect(data.Volumes['/app/data']).to.eql(paths.DATA_DIR + '/' + APP_ID + '/data'); + expect(data.Volumes['/app/data']).to.eql(paths.APPS_DATA_DIR + '/' + APP_ID + '/data'); } else { - expect(data.Mounts.filter(function (mount) { return mount.Destination === '/app/data'; })[0].Source).to.eql(paths.DATA_DIR + '/' + APP_ID + '/data'); + expect(data.Mounts.filter(function (mount) { return mount.Destination === '/app/data'; })[0].Source).to.eql(paths.APPS_DATA_DIR + '/' + APP_ID + '/data'); } done(); @@ -1140,7 +1140,7 @@ describe('App installation', function () { }); it('uninstalled - volume destroyed', function (done) { - expect(!fs.existsSync(paths.DATA_DIR + '/' + APP_ID)); + expect(!fs.existsSync(paths.APPS_DATA_DIR + '/' + APP_ID)); done(); }); diff --git a/src/test/apptask-test.js b/src/test/apptask-test.js index 56ae9fa32..a70c58bc4 100644 --- a/src/test/apptask-test.js +++ b/src/test/apptask-test.js @@ -130,7 +130,7 @@ describe('apptask', function () { it('create volume', function (done) { apptask._createVolume(APP, function (error) { - expect(fs.existsSync(paths.DATA_DIR + '/' + APP.id + '/data')).to.be(true); + expect(fs.existsSync(paths.APPS_DATA_DIR + '/' + APP.id + '/data')).to.be(true); expect(error).to.be(null); done(); }); @@ -138,7 +138,7 @@ describe('apptask', function () { it('delete volume', function (done) { apptask._deleteVolume(APP, function (error) { - expect(!fs.existsSync(paths.DATA_DIR + '/' + APP.id + '/data')).to.be(true); + expect(!fs.existsSync(paths.APPS_DATA_DIR + '/' + APP.id + '/data')).to.be(true); expect(error).to.be(null); done(); }); @@ -241,5 +241,3 @@ describe('apptask', function () { }); }); }); - -