From d3fb244cef50240bfe33c46b153a68cb7c4f1da0 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 4 Aug 2015 16:29:49 -0700 Subject: [PATCH 001/234] list ldap as 0.0.25 change --- .gitattributes | 8 + .gitignore | 3 + .jshintrc | 7 + admin/admin | 340 ++++++++++ admin/cloudronLogin | 17 + images/createBoxTarball | 118 ++++ images/createDigitalOceanImage.sh | 196 ++++++ images/digitalOceanFunctions.sh | 180 ++++++ images/initializeBaseUbuntuImage.sh | 229 +++++++ npm-shrinkwrap.json | 923 ++++++++++++++++++++++++++++ package.json | 52 ++ release/CHANGES | 85 +++ release/images | 216 +++++++ release/parsechangelog.js | 31 + release/release | 591 ++++++++++++++++++ release/versions.json | 418 +++++++++++++ release/versionsformat.js | 65 ++ src/announce.js | 65 ++ src/installer.js | 100 +++ src/scripts/installer.sh | 62 ++ src/scripts/retire.sh | 26 + src/server.js | 293 +++++++++ src/test/installer-test.js | 332 ++++++++++ 23 files changed, 4357 insertions(+) create mode 100644 .gitattributes create mode 100644 .gitignore create mode 100644 .jshintrc create mode 100755 admin/admin create mode 100755 admin/cloudronLogin create mode 100755 images/createBoxTarball create mode 100755 images/createDigitalOceanImage.sh create mode 100644 images/digitalOceanFunctions.sh create mode 100755 images/initializeBaseUbuntuImage.sh create mode 100644 npm-shrinkwrap.json create mode 100644 package.json create mode 100644 release/CHANGES create mode 100755 release/images create mode 100644 release/parsechangelog.js create mode 100755 release/release create mode 100644 release/versions.json create mode 100644 release/versionsformat.js create mode 100644 src/announce.js create mode 100644 src/installer.js create mode 100755 src/scripts/installer.sh create mode 100755 src/scripts/retire.sh create mode 100755 src/server.js create mode 100644 src/test/installer-test.js diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..ef7233d0e --- /dev/null +++ b/.gitattributes @@ -0,0 +1,8 @@ +# following files are skipped when exporting using git archive +/release export-ignore +/images export-ignore +/admin export-ignore +test export-ignore +.gitattributes export-ignore +.gitignore export-ignore + diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..767e764bb --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +node_modules/ +src/certs/server.key + diff --git a/.jshintrc b/.jshintrc new file mode 100644 index 000000000..ad6d169fb --- /dev/null +++ b/.jshintrc @@ -0,0 +1,7 @@ +{ + "node": true, + "browser": true, + "unused": true, + "globalstrict": true, + "predef": [ "angular", "$" ] +} diff --git a/admin/admin b/admin/admin new file mode 100755 index 000000000..7e79bb534 --- /dev/null +++ b/admin/admin @@ -0,0 +1,340 @@ +#!/usr/bin/env node + +'use strict'; + +var assert = require('assert'), + async = require('async'), + crypto = require('crypto'), + execSync = require('child_process').execSync, + fs = require('fs'), + https = require('https'), + os = require('os'), + path = require('path'), + program = require('commander'), + readlineSync = require('readline-sync'), + spawn = require('child_process').spawn, + SshClient = require('ssh2').Client, + superagent = require('superagent'), + util = require('util'); + +require('colors'); + +var SSH = 'root@%s -tt -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=10 -i %s'; +var sshKeyPath = path.join(process.env.HOME, '/.ssh/id_rsa_yellowtent'); + +if (!process.env['DIGITAL_OCEAN_TOKEN_DEV']) exit('Missing env variable DIGITAL_OCEAN_TOKEN_DEV'); +if (!process.env['DIGITAL_OCEAN_TOKEN_STAGING']) exit('Missing env variable DIGITAL_OCEAN_TOKEN_STAGING'); + +if (!fs.existsSync(sshKeyPath)) exit('Unable to find ssh key path. Searching for ' + sshKeyPath); + +// Allow self signed certs! +process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; + +function exit(error) { + if (error) console.log(error); + process.exit(error ? 1 : 0); +} + +function getDroplets(token, callback) { + assert.strictEqual(typeof token, 'string'); + assert.strictEqual(typeof callback, 'function'); + + var droplets = []; + var nextPage = null; + + async.doWhilst(function (callback) { + var url = nextPage ? nextPage : 'https://api.digitalocean.com/v2/droplets'; + + superagent.get(url).set('Authorization', 'Bearer ' + token).end(function (error, result) { + if (error) return callback(error.message); + if (result.statusCode === 403) return callback('Invalid Digitalocean credentials'); + if (result.statusCode !== 200) return callback(util.format('Unable to get droplet list. %s - %s', result.statusCode, result.text)); + + nextPage = (result.body.links && result.body.links.pages) ? result.body.links.pages.next : null; + droplets = droplets.concat(result.body.droplets); + + callback(null); + }); + }, function () { return !!nextPage; }, function (error) { + if (error) return callback(error); + callback(null, droplets); + }); +} + +function selectCloudron(action) { + assert.strictEqual(typeof action, 'function'); + + var dropletsDev = []; + var dropletsStaging = []; + var dropletsProd = []; + + console.log('Getting droplet lists from dev and staging...'); + + getDroplets(process.env['DIGITAL_OCEAN_TOKEN_DEV'], function (error, result) { + if (error) exit(error); + + dropletsDev = result; + + getDroplets(process.env['DIGITAL_OCEAN_TOKEN_STAGING'], function (error, result) { + if (error) exit(error); + + dropletsStaging = result; + + getDroplets(process.env['DIGITAL_OCEAN_TOKEN_PROD'], function (error, result) { + if (error) exit(error); + + dropletsProd = result; + + console.log(); + console.log('Available Droplets on dev:'.bold); + dropletsDev.forEach(function (droplet, index) { + console.log('\t(%s)\t%s %s', index, droplet.name.cyan, droplet.networks.v4[0].ip_address); + }); + + console.log(); + console.log('Available Droplets on staging:'.bold); + dropletsStaging.forEach(function (droplet, index) { + console.log('\t(%s)\t%s %s', dropletsDev.length + index, droplet.name.cyan, droplet.networks.v4[0].ip_address); + }); + + console.log(); + console.log('Available Droplets on prod:'.bold); + dropletsProd.forEach(function (droplet, index) { + console.log('\t(%s)\t%s %s', dropletsDev.length + dropletsStaging.length + index, droplet.name.cyan, droplet.networks.v4[0].ip_address); + }); + + console.log(); + + var droplets = dropletsDev.concat(dropletsStaging).concat(dropletsProd); + + var index = -1; + while (true) { + index = parseInt(readlineSync.question('Choose cloudron [0-' + (droplets.length-1) + ']: ', {})); + if (isNaN(index) || index < 0 || index > droplets.length-1) console.log('Invalid selection'.red); + else break; + } + + action(droplets[index].networks.v4[0].ip_address); + }); + }); + }); +} + +function loginToCloudron(ip) { + assert.strictEqual(typeof ip, 'string'); + + console.log('Ssh into %s'.bold, ip.cyan); + + var ssh = spawn('ssh', util.format(SSH, ip, sshKeyPath).split(' ')); + ssh.on('exit', exit); + ssh.on('error', exit); + + process.stdin.setEncoding('utf8'); + process.stdin.setRawMode(true); + + process.stdin.pipe(ssh.stdin); + ssh.stdout.pipe(process.stdout); + ssh.stderr.pipe(process.stderr); + + process.stdin.resume(); + +} + +function logsFromCloudron(ip, fileName, tail) { + assert.strictEqual(typeof ip, 'string'); + assert.strictEqual(typeof fileName, 'string'); + assert.strictEqual(typeof tail, 'boolean'); + + console.log('Fetching logs from'.bold, ip.cyan); + + var options = { + hostname: ip, + port: 886, + path: util.format('/api/v1/installer/logs?filename=%s&tail=%s', fileName, tail), + method: 'GET', + key: fs.readFileSync(path.join(__dirname, '../../keys/installer/server.key')), + cert: fs.readFileSync(path.join(__dirname, '../../keys/installer/server.crt')), + ca: fs.readFileSync(path.join(__dirname, '../../keys/installer_ca/ca.crt')), + rejectUnauthorized: false + }; + + var req = https.request(options, function (res) { + res.setEncoding('utf8'); + res.on('data', function (chunk) { + process.stdout.write(chunk); + }); + }); + + req.on('error', function (error) { + exit(error); + }); + + req.end(); +} + +function triggerBackup(ip) { + assert.strictEqual(typeof ip, 'string'); + + console.log('Trigger backup on %s'.bold, ip.cyan); + + var options = { + hostname: ip, + port: 886, + path: '/api/v1/installer/backup', + method: 'POST', + key: fs.readFileSync(path.join(__dirname, '../../keys/installer/server.key')), + cert: fs.readFileSync(path.join(__dirname, '../../keys/installer/server.crt')), + ca: fs.readFileSync(path.join(__dirname, '../../keys/installer_ca/ca.crt')), + rejectUnauthorized: false + }; + + var req = https.request(options, function (res) { + res.setEncoding('utf8'); + res.on('data', function (chunk) { + process.stdout.write(chunk); + }); + }); + + req.on('error', function (error) { + exit(error); + }); + + req.end(); +} + +function sshExec(ip, cmds) { + var privateKey = path.join(process.env.HOME, '.ssh/id_rsa_yellowtent'); + if (!fs.existsSync(privateKey)) exit('cannot find private key'); + + var sshClient = new SshClient(); + sshClient.connect({ + host: ip, + port: 22, + username: 'root', + privateKey: fs.readFileSync(privateKey) + }); + sshClient.on('ready', function () { + console.log('connected'); + + async.eachSeries(cmds, function (cmd, iteratorDone) { + console.log(cmd.cmd.yellow); + + sshClient.exec(cmd.cmd, function(err, stream) { + if (err) exit(err.message); + + if (cmd.stdin) cmd.stdin.pipe(stream); + stream.pipe(process.stdout); + stream.on('close', function () { + iteratorDone(); + }); + }); + }, function seriesDone(error) { + if (error) exit(error.message); + + console.log('Done patching'.green); + sshClient.end(); + }); + }); + sshClient.on('error', function (error) { + exit(error.message); + }); + sshClient.on('exit', function (exitCode) { + console.log('exit'); + process.exit(exitCode); + }); +} + +function hotfixCloudron(ip, code) { + var CMDS = [ + { cmd: 'supervisorctl stop all' }, + { cmd: 'rm -rf /home/yellowtent/box/* /home/yellowtent/box/.*' }, + { cmd: 'tar zxf - -C /home/yellowtent/box', stdin: fs.createReadStream(code) }, + { cmd: 'cd /home/yellowtent/box && npm rebuild' }, + { cmd: 'chown -R yellowtent.yellowtent /home/yellowtent/box' }, + { cmd: 'sed -e "s/restoreUrl/_restoreUrl/" -i /home/yellowtent/setup_start.sh' }, // do not restore + { cmd: '/home/yellowtent/setup_start.sh' } // ensure db-migrate runs as well + ]; + + sshExec(ip, CMDS); +} + +function hotfix(options) { + var code; + + if (!options.code) { + var answer = readlineSync.question('Create a tarball from repo (y/n)? '); + if (answer !== 'y') return exit(); + code = os.tmpdir() + '/boxtarball.tar.gz'; + execSync(path.join(__dirname, '../images/createBoxTarball --output ' + code + ' --no-upload'), { stdio: [ null, process.stdout, process.stderr ] }); + } else { + code = options.code; + } + + if (!options.ip) { + selectCloudron(function (ip) { hotfixCloudron(ip, code); }); + } else { + hotfixCloudron(options.ip, code); + } +} + +function login(options) { + if (!options.ip) selectCloudron(loginToCloudron); + else loginToCloudron(options.ip); +} + +function logs(options) { + var fileName = '/var/log/supervisor/box.log'; + + if (options.installer) fileName = '/var/log/cloudron/installserver.log'; + if (options.nginxAccess) fileName = '/var/log/nginx/access.log'; + if (options.nginxError) fileName = '/var/log/nginx/error.log'; + + if (!options.ip) selectCloudron(function (ip) { logsFromCloudron(ip, fileName, !!options.tail); }); + else logsFromCloudron(options.ip, fileName, !!options.tail); +} + +function backup(options) { + if (!options.ip) selectCloudron(triggerBackup); + else triggerBackup(options.ip); +} + +// entry point +program.version('0.1.0'); + +program.command('login') + .description('Login to cloudron') + .option('--ip ', 'Cloudron IP') + .action(login); + +program.command('logs') + .description('Fetch logs by filename') + .option('--ip ', 'Cloudron IP') + .option('-f, --tail', 'tail the logs') + .option('--installer', 'installer logs') + .option('--nginx-error', 'nginx error logs') + .option('--nginx-access', 'nginx access logs') + .option('--box', 'box logs [default]') + .action(logs); + +program.command('hotfix') + .description('Hotfix a cloudron') + .option('--ip ', 'Cloudron IP') + .option('--code ', 'Code tarball') + .action(hotfix); + +program.command('backup') + .description('Backup a cloudron') + .option('--ip ', 'Cloudron IP') + .action(backup); + +program.parse(process.argv); + +if (!process.argv.slice(2).length) { + program.outputHelp(); +} else { // https://github.com/tj/commander.js/issues/338 + var knownCommand = program.commands.some(function (command) { return command._name === process.argv[2]; }); + if (!knownCommand) { + console.error('Unknown command: ' + process.argv[2]); + process.exit(1); + } +} diff --git a/admin/cloudronLogin b/admin/cloudronLogin new file mode 100755 index 000000000..4e62de8c7 --- /dev/null +++ b/admin/cloudronLogin @@ -0,0 +1,17 @@ +#!/bin/bash + +set -eu -o pipefail + +readonly ssh_keys="${HOME}/.ssh/id_rsa_yellowtent" + +if [[ "$#" != "1" ]]; then + echo "Missing cloudron IP argument"; + exit 1; +fi + +if [[ ! -f "${ssh_keys}" ]]; then + echo "yellowtent ssh key is missing" + exit 1 +fi + +ssh root@$1 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=10 -i "${ssh_keys}" diff --git a/images/createBoxTarball b/images/createBoxTarball new file mode 100755 index 000000000..cbbfeafce --- /dev/null +++ b/images/createBoxTarball @@ -0,0 +1,118 @@ +#!/bin/bash + +set -eu + +assertNotEmpty() { + : "${!1:? "$1 is not set."}" +} + +# Only GNU getopt supports long options. OS X comes bundled with the BSD getopt +# brew install gnu-getopt to get the GNU getopt on OS X +[[ $(uname -s) == "Darwin" ]] && GNU_GETOPT="/usr/local/opt/gnu-getopt/bin/getopt" || GNU_GETOPT="getopt" +readonly GNU_GETOPT + +args=$(${GNU_GETOPT} -o "" -l "revision:,output:,publish,no-upload" -n "$0" -- "$@") +eval set -- "${args}" + +readonly script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +readonly box_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../box" && pwd)" + +delete_bundle="yes" +commitish="HEAD" +publish="no" +upload="yes" +bundle_file="" + +while true; do + case "$1" in + --revision) commitish="$2"; shift 2;; + --output) bundle_file="$2"; delete_bundle="no"; shift 2;; + --no-upload) upload="no"; shift;; + --publish) publish="yes"; shift;; + --) break;; + *) echo "Unknown option $1"; exit 1;; + esac +done + +if [[ "${upload}" == "no" && "${publish}" == "yes" ]]; then + echo "Cannot publish without uploading" + exit 1 +fi + +readonly TMPDIR=${TMPDIR:-/tmp} # why is this not set on mint? + +assertNotEmpty AWS_DEV_ACCESS_KEY +assertNotEmpty AWS_DEV_SECRET_KEY + +if ! $(cd "${box_dir}" && git diff --exit-code >/dev/null); then + echo "You have local changes, stash or commit them to proceed" + exit 1 +fi + +version=$(cd "${box_dir}" && git rev-parse "${commitish}") +bundle_dir=$(mktemp -d -t box 2>/dev/null || mktemp -d box-XXXXXXXXXX --tmpdir=$TMPDIR) +[[ -z "$bundle_file" ]] && bundle_file="${TMPDIR}/box-${version}.tar.gz" + +chmod "o+rx,g+rx" "${bundle_dir}" # otherwise extracted tarball director won't be readable by others/group +echo "Checking out code [${version}] into ${bundle_dir}" +(cd "${box_dir}" && git archive --format=tar ${version} | (cd "${bundle_dir}" && tar xf -)) + +if diff "${TMPDIR}/boxtarball.cache/package.json.all" "${bundle_dir}/package.json" >/dev/null 2>&1; then + echo "Reusing dev modules from cache" + cp -r "${TMPDIR}/boxtarball.cache/node_modules-all/." "${bundle_dir}/node_modules" +else + echo "Installing modules with dev dependencies" + (cd "${bundle_dir}" && npm install) + + echo "Caching dev dependencies" + mkdir -p "${TMPDIR}/boxtarball.cache/node_modules-all" + rsync -a --delete "${bundle_dir}/node_modules/" "${TMPDIR}/boxtarball.cache/node_modules-all/" + cp "${bundle_dir}/package.json" "${TMPDIR}/boxtarball.cache/package.json.all" +fi + +echo "Building webadmin assets" +(cd "${bundle_dir}" && gulp) + +echo "Remove intermediate files required at build-time only" +rm -rf "${bundle_dir}/node_modules/" +rm -rf "${bundle_dir}/webadmin/src" +rm -rf "${bundle_dir}/gulpfile.js" + +if diff "${TMPDIR}/boxtarball.cache/package.json.prod" "${bundle_dir}/package.json" >/dev/null 2>&1; then + echo "Reusing prod modules from cache" + cp -r "${TMPDIR}/boxtarball.cache/node_modules-prod/." "${bundle_dir}/node_modules" +else + echo "Installing modules for production" + (cd "${bundle_dir}" && npm install --production) + + echo "Caching prod dependencies" + mkdir -p "${TMPDIR}/boxtarball.cache/node_modules-prod" + rsync -a --delete "${bundle_dir}/node_modules/" "${TMPDIR}/boxtarball.cache/node_modules-prod/" + cp "${bundle_dir}/package.json" "${TMPDIR}/boxtarball.cache/package.json.prod" +fi + +echo "Create final tarball" +(cd "${bundle_dir}" && tar czf "${bundle_file}" .) +echo "Cleaning up ${bundle_dir}" +rm -rf "${bundle_dir}" + +if [[ "${upload}" == "yes" ]]; then + echo "Uploading bundle to S3" + # That special header is needed to allow access with singed urls created with different aws credentials than the ones the file got uploaded + s3cmd --ssl --add-header=x-amz-acl:authenticated-read --access_key="${AWS_DEV_ACCESS_KEY}" --secret_key="${AWS_DEV_SECRET_KEY}" --no-mime-magic put "${bundle_file}" "s3://dev-cloudron-releases/box-${version}.tar.gz" + + versions_file_url="https://dev-cloudron-releases.s3.amazonaws.com/box-${version}.tar.gz" + echo "The URL for the versions file is: ${versions_file_url}" + + if [[ "${publish}" == "yes" ]]; then + echo "Publishing to dev" + ${script_dir}/release/release create --env dev --code "${versions_file_url}" + fi +fi + +if [[ "${delete_bundle}" == "no" ]]; then + echo "Tarball preserved at ${bundle_file}" +else + rm "${bundle_file}" +fi + diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh new file mode 100755 index 000000000..5755a4c70 --- /dev/null +++ b/images/createDigitalOceanImage.sh @@ -0,0 +1,196 @@ +#!/bin/bash + +set -eu -o pipefail + +assertNotEmpty() { + : "${!1:? "$1 is not set."}" +} + +readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +readonly INSTALLER_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)" +readonly JSON="${INSTALLER_DIR}/node_modules/.bin/json" +readonly ssh_keys="${HOME}/.ssh/id_rsa_yellowtent" + +installer_revision=$(git rev-parse HEAD) +box_size="512mb" +image_regions=(sfo1 ams3) +box_name="" +droplet_id="" +droplet_ip="" +destroy_droplet="yes" +deploy_env="dev" + +# Only GNU getopt supports long options. OS X comes bundled with the BSD getopt +# brew install gnu-getopt to get the GNU getopt on OS X +[[ $(uname -s) == "Darwin" ]] && GNU_GETOPT="/usr/local/opt/gnu-getopt/bin/getopt" || GNU_GETOPT="getopt" +readonly GNU_GETOPT + +args=$(${GNU_GETOPT} -o "" -l "revision:,regions:,size:,box:,no-destroy,env:" -n "$0" -- "$@") +eval set -- "${args}" + +while true; do + case "$1" in + --env) deploy_env="$2"; shift 2;; + --revision) installer_revision="$2"; shift 2;; + --regions) image_regions=("$2"); shift 2;; # parse as whitespace separated array + --size) box_size="$2"; shift 2;; + --box) box_name="$2"; destroy_droplet="no"; shift 2;; + --no-destroy) destroy_droplet="no"; shift 2;; + --) break;; + *) echo "Unknown option $1"; exit 1;; + esac +done + +# set DO token, picked up by digitalOceanFunctions.sh +if [[ "${deploy_env}" == "staging" ]]; then + assertNotEmpty DIGITAL_OCEAN_TOKEN_STAGING + readonly DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_STAGING}" +elif [[ "${deploy_env}" == "dev" ]]; then + assertNotEmpty DIGITAL_OCEAN_TOKEN_DEV + readonly DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_DEV}" +elif [[ "${deploy_env}" == "prod" ]]; then + assertNotEmpty DIGITAL_OCEAN_TOKEN_PROD + readonly DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_PROD}" +else + echo "No such env ${deploy_env}." + exit 1 +fi +source "${SCRIPT_DIR}/digitalOceanFunctions.sh" + +if [[ ! -f "${ssh_keys}" ]]; then + echo "yellowtent ssh key is missing" + exit 1 +fi + +function get_pretty_revision() { + local git_rev="$1" + local sha1=$(git rev-parse --short "${git_rev}" 2>/dev/null) + local name=$(git name-rev --name-only --tags "${sha1}" 2>/dev/null) + + if [[ -z "${name}" ]]; then + echo "Unable to resolve $1" + exit 1 + fi + + # fallback to sha1 if we cannot find a tag + if [[ "${name}" == "undefined" ]]; then + echo "${sha1}" + else + echo "${name}" + fi +} + +now=$(date "+%Y-%m-%d-%H%M%S") +pretty_revision=$(get_pretty_revision "${installer_revision}") + +if [[ -z "${box_name}" ]]; then + # if you change this, change the regexp is appstore/janitor.js + box_name="box-${deploy_env}-${pretty_revision}-${now}" # remove slashes + + # create a new droplet if no name given + yellowtent_ssh_key_id=$(get_ssh_key_id "yellowtent") + if [[ -z "${yellowtent_ssh_key_id}" ]]; then + echo "Could not query yellowtent ssh key" + exit 1 + fi + echo "Detected yellowtent ssh key id: ${yellowtent_ssh_key_id}" # 124654 for yellowtent key + + echo "Creating Droplet with name [${box_name}] at [${image_regions[0]}] with size [${box_size}]" + droplet_id=$(create_droplet ${yellowtent_ssh_key_id} ${box_name} ${box_size} ${image_regions[0]}) + if [[ -z "${droplet_id}" ]]; then + echo "Failed to create droplet" + exit 1 + fi + echo "Created droplet with id: ${droplet_id}" + + # If we run scripts overenthusiastically without the wait, setup script randomly fails + echo -n "Waiting 120 seconds for droplet creation" + for i in $(seq 1 24); do + echo -n "." + sleep 5 + done + echo "" +else + droplet_id=$(get_droplet_id "${box_name}") + echo "Reusing droplet with id: ${droplet_id}" + + power_on_droplet "${droplet_id}" +fi + +# Query DO until we get an IP +while true; do + echo "Trying to get the droplet IP" + droplet_ip=$(get_droplet_ip "${droplet_id}") + if [[ "${droplet_ip}" != "" ]]; then + echo "Droplet IP : [${droplet_ip}]" + break + fi + echo "Timedout, trying again in 10 seconds" + sleep 10 +done + +while true; do + echo "Trying to copy init script to droplet" + if scp -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${SCRIPT_DIR}/initializeBaseUbuntuImage.sh" root@${droplet_ip}:.; then + break + fi + echo "Timedout, trying again in 30 seconds" + sleep 30 +done + +echo "Copying installer source" +cd "${INSTALLER_DIR}" +git archive --format=tar HEAD | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "root@${droplet_ip}" "cat - > /root/installer.tar" + +echo "Executing init script" +if ! ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "root@${droplet_ip}" "/bin/bash /root/initializeBaseUbuntuImage.sh ${installer_revision}"; then + echo "Init script failed" + exit 1 +fi + +echo "Copy over certs" +scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${INSTALLER_DIR}/../keys/installer/" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" +scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${INSTALLER_DIR}/../keys/installer_ca/ca.crt" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" + +echo "Shutting down droplet with id : ${droplet_id}" +ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "root@${droplet_ip}" "shutdown -f now" || true # shutdown sometimes terminates ssh connection immediately making this command fail + +# wait 10 secs for actual shutdown +echo "Waiting for 10 seconds for droplet to shutdown" +sleep 30 + +echo "Powering off droplet" +power_off_droplet "${droplet_id}" + +snapshot_name="box-${deploy_env}-${pretty_revision}-${now}" +echo "Snapshotting as ${snapshot_name}" +snapshot_droplet "${droplet_id}" "${snapshot_name}" + +image_id=$(get_image_id "${snapshot_name}") +echo "Image id is ${image_id}" + +if [[ "${destroy_droplet}" == "yes" ]]; then + echo "Destroying droplet" + destroy_droplet "${droplet_id}" +else + echo "Skipping droplet destroy" +fi + +echo "Transferring image to other regions" +xfer_events=() +# skip the first region, as the image was created there +for image_region in ${image_regions[@]:1}; do + xfer_event=$(transfer_image ${image_id} ${image_region}) + echo "Image transfer to ${image_region} initiated. Event id: ${xfer_event}" + xfer_events+=("${xfer_event}") + sleep 1 +done + +echo "Image transfer initiated, but they will take some time to get transferred." + +for xfer_event in ${xfer_events[@]}; do + wait_for_image_event "${image_id}" "${xfer_event}" +done + +echo "Done." + diff --git a/images/digitalOceanFunctions.sh b/images/digitalOceanFunctions.sh new file mode 100644 index 000000000..447973373 --- /dev/null +++ b/images/digitalOceanFunctions.sh @@ -0,0 +1,180 @@ +#!/bin/bash + +if [[ -z "${DIGITAL_OCEAN_TOKEN}" ]]; then + echo "Script requires DIGITAL_OCEAN_TOKEN env to be set" + exit 1 +fi + +if [[ -z "${JSON}" ]]; then + echo "Script requires JSON env to be set to path of JSON binary" + exit 1 +fi + +readonly CURL="curl -s -u ${DIGITAL_OCEAN_TOKEN}:" + +function get_ssh_key_id() { + $CURL "https://api.digitalocean.com/v2/account/keys" \ + | $JSON ssh_keys \ + | $JSON -c "this.name === \"$1\"" \ + | $JSON 0.id +} + +function create_droplet() { + local ssh_key_id="$1" + local box_name="$2" + local box_size="$3" + local image_region="$4" + + local ubuntu_image_slug="ubuntu-14-10-x64" + + local data="{\"name\":\"${box_name}\",\"size\":\"${box_size}\",\"region\":\"${image_region}\",\"image\":\"${ubuntu_image_slug}\",\"ssh_keys\":[ \"${ssh_key_id}\" ],\"backups\":false}" + + $CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets" | $JSON droplet.id +} + +function get_droplet_ip() { + local droplet_id="$1" + $CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}" | $JSON "droplet.networks.v4[0].ip_address" +} + +function get_droplet_id() { + local droplet_name="$1" + $CURL "https://api.digitalocean.com/v2/droplets?per_page=100" | $JSON "droplets" | $JSON -c "this.name === '${droplet_name}'" | $JSON "[0].id" +} + +function power_off_droplet() { + local droplet_id="$1" + local data='{"type":"power_off"}' + local response=$($CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions") + local event_id=`echo "${response}" | $JSON action.id` + + if [[ -z "${event_id}" ]]; then + echo "Got no event id, assuming already powered off." + echo "Response: ${response}" + return + fi + + echo "Powered off droplet. Event id: ${event_id}" + echo -n "Waiting for droplet to power off" + + while true; do + local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status` + if [[ "${event_status}" == "completed" ]]; then + break + fi + echo -n "." + sleep 10 + done + echo "" +} + +function power_on_droplet() { + local droplet_id="$1" + local data='{"type":"power_on"}' + local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions" | $JSON action.id` + + echo "Powered on droplet. Event id: ${event_id}" + + if [[ -z "${event_id}" ]]; then + echo "Got no event id, assuming already powered on" + return + fi + + echo -n "Waiting for droplet to power on" + + while true; do + local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status` + if [[ "${event_status}" == "completed" ]]; then + break + fi + echo -n "." + sleep 10 + done + echo "" +} + +function snapshot_droplet() { + local droplet_id="$1" + local snapshot_name="$2" + local data="{\"type\":\"snapshot\",\"name\":\"${snapshot_name}\"}" + local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions" | $JSON action.id` + + echo "Droplet snapshotted as ${snapshot_name}. Event id: ${event_id}" + echo -n "Waiting for snapshot to complete" + + while true; do + local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status` + if [[ "${event_status}" == "completed" ]]; then + break + fi + echo -n "." + sleep 10 + done + echo "" +} + +function destroy_droplet() { + local droplet_id="$1" + # TODO: check for 204 status + $CURL -X DELETE "https://api.digitalocean.com/v2/droplets/${droplet_id}" + echo "Droplet destroyed" + echo "" +} + +function transfer_image() { + local image_id="$1" + local region_slug="$2" + local data="{\"type\":\"transfer\",\"region\":\"${region_slug}\"}" + local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/images/${image_id}/actions" | $JSON action.id` + echo "${event_id}" +} + +function get_image_id() { + local snapshot_name="$1" + local image_id="" + + image_id=$($CURL "https://api.digitalocean.com/v2/images?per_page=100" \ + | $JSON images \ + | $JSON -c "this.name === \"${snapshot_name}\"" 0.id) + + if [[ -n "${image_id}" ]]; then + echo "${image_id}" + fi +} + +function get_image_id_by_revision() { + local revision="$1" + local image_id="" + + image_id=$($CURL "https://api.digitalocean.com/v2/images?per_page=100" \ + | $JSON images \ + | $JSON -c "this.name.indexOf(\"box-${revision}\") === 0" 0.id) + + if [[ -n "${image_id}" ]]; then + echo "${image_id}" + fi +} + +function get_image_name() { + local image_id="$1" + $CURL "https://api.digitalocean.com/v2/images/${image_id}?per_page=100" \ + | $JSON image.name +} + +function wait_for_image_event() { + local image_id="$1" + local event_id="$2" + + echo -n "Waiting for ${event_id}" + + while true; do + local event_status=`$CURL "https://api.digitalocean.com/v2/images/${image_id}/actions/${event_id}" | $JSON action.status` + if [[ "${event_status}" == "completed" ]]; then + break + fi + echo -n "." + sleep 10 + done + echo "" +} + diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh new file mode 100755 index 000000000..87c2f881d --- /dev/null +++ b/images/initializeBaseUbuntuImage.sh @@ -0,0 +1,229 @@ +#!/bin/bash + +set -euv -o pipefail + +readonly USER=yellowtent +readonly USER_HOME="/home/${USER}" +readonly DATA_DIR="${USER_HOME}/data" +readonly APPDATA="${DATA_DIR}/appdata" +readonly INSTALLER_SOURCE_DIR="${USER_HOME}/installer" +readonly INSTALLER_REVISION="$1" +readonly DOCKER_DATA_FILE="/root/docker_data.img" +readonly USER_HOME_FILE="/root/user_home.img" + +echo "==== Create User ${USER} ====" +if ! id "${USER}"; then + useradd "${USER}" -m +fi + +echo "=== Yellowtent base image preparation (installer revision - ${INSTALLER_REVISION}) ===" + +export DEBIAN_FRONTEND=noninteractive + +# Allocate two sets of swap files - one for general app usage and another for backup +# The backup swap is setup for swap on the fly by the backup scripts +echo "=== Setup swap file ===" +apps_swap_file="/apps.swap" +[[ -f "${apps_swap_file}" ]] && swapoff "${apps_swap_file}" +fallocate -l 1024m "${apps_swap_file}" +chmod 600 "${apps_swap_file}" +mkswap "${apps_swap_file}" +swapon "${apps_swap_file}" +echo "${apps_swap_file} none swap sw 0 0" >> /etc/fstab + +backup_swap_file="/backup.swap" +[[ -f "${backup_swap_file}" ]] && swapoff "${backup_swap_file}" +fallocate -l 1024m "${backup_swap_file}" +chmod 600 "${backup_swap_file}" +mkswap "${backup_swap_file}" + +echo "==== Install project dependencies ====" +apt-get update + +echo "=== Upgrade ===" +apt-get upgrade -y + +# Setup firewall before everything. Atleast docker 1.5 creates it's own chain and the -X below will remove it +# Do NOT use iptables-persistent because it's startup ordering conflicts with docker +echo "=== Setting up firewall ===" +# clear tables and set default policy +iptables -F # flush all chains +iptables -X # delete all chains +# default policy for filter table +iptables -P INPUT DROP +iptables -P FORWARD ACCEPT # TODO: disable icc and make this as reject +iptables -P OUTPUT ACCEPT + +# NOTE: keep these in sync with src/apps.js validatePortBindings +# allow ssh, http, https, ping, dns +iptables -I INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT +iptables -A INPUT -p tcp --dport 22 -j ACCEPT +iptables -A INPUT -p tcp -m tcp -m multiport --dports 80,443,886 -j ACCEPT +iptables -A INPUT -p icmp --icmp-type echo-request -j ACCEPT +iptables -A INPUT -p icmp --icmp-type echo-reply -j ACCEPT +iptables -A INPUT -p udp --sport 53 -j ACCEPT +iptables -A INPUT -s 172.17.0.0/16 -j ACCEPT # required to accept any connections from apps to our IP: + +# loopback +iptables -A INPUT -i lo -j ACCEPT +iptables -A OUTPUT -o lo -j ACCEPT + +# disable metadata access to non-root +# modprobe ipt_owner +iptables -A OUTPUT -m owner ! --uid-owner root -d 169.254.169.254 -j DROP + +# prevent DoS +# iptables -A INPUT -p tcp --dport 80 -m limit --limit 25/minute --limit-burst 100 -j ACCEPT + +# log dropped incoming. keep this at the end of all the rules +iptables -N LOGGING # new chain +iptables -A INPUT -j LOGGING # last rule in INPUT chain +iptables -A LOGGING -m limit --limit 2/min -j LOG --log-prefix "IPTables Packet Dropped: " --log-level 7 +iptables -A LOGGING -j DROP + +echo "==== Install docker ====" +# see http://idolstarastronomer.com/painless-docker.html +echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list +apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 +apt-get update +apt-get -y install lxc-docker-1.5.0 +ln -sf /usr/bin/docker.io /usr/local/bin/docker + +if [ ! -f "${DOCKER_DATA_FILE}" ]; then + service docker stop + if aufs_mounts=$(grep 'aufs' /proc/mounts | awk '{print$2}' | sort -r); then + umount -l "${aufs_mounts}" + fi + rm -rf /var/lib/docker + mkdir /var/lib/docker + + # create a separate 12GB fs for docker images + # dd if=/dev/zero of=/root/docker_data.img bs=1M count=12000 + apt-get -y install btrfs-tools + truncate -s 12G "${DOCKER_DATA_FILE}" + mkfs.btrfs -L DockerData "${DOCKER_DATA_FILE}" + echo "${DOCKER_DATA_FILE} /var/lib/docker btrfs loop,nosuid 0 0" >> /etc/fstab + echo 'DOCKER_OPTS="-s btrfs"' >> /etc/default/docker + mount "${DOCKER_DATA_FILE}" + + service docker start + # give docker sometime to start up and create iptables rules + sleep 10 +fi + +# ubuntu will restore iptables from this file automatically. this is here so that docker's chain is saved to this file +mkdir /etc/iptables && iptables-save > /etc/iptables/rules.v4 + +# now add the user to the docker group +usermod "${USER}" -a -G docker +echo "=== Pulling base docker images ===" +docker pull cloudron/base:0.3.0 + +echo "=== Pulling mysql addon image ===" +docker pull cloudron/mysql:0.3.0 + +echo "=== Pulling postgresql addon image ===" +docker pull cloudron/postgresql:0.3.0 + +echo "=== Pulling redis addon image ===" +docker pull cloudron/redis:0.3.0 + +echo "=== Pulling mongodb addon image ===" +docker pull cloudron/mongodb:0.3.0 + +echo "=== Pulling graphite docker images ===" +docker pull cloudron/graphite:0.3.1 + +echo "=== Pulling mail relay ===" +docker pull cloudron/mail:0.3.0 + +echo "==== Install nginx ====" +apt-get -y install nginx-full + +echo "==== Install build-essential ====" +apt-get -y install build-essential rcconf + + +echo "==== Install mysql ====" +debconf-set-selections <<< 'mysql-server mysql-server/root_password password password' +debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password password' +apt-get -y install mysql-server + +echo "==== Install pwgen ====" +apt-get -y install pwgen + +echo "==== Install supervisor ====" +apt-get -y install supervisor + +echo "==== Install collectd ===" +apt-get install -y collectd collectd-utils +update-rc.d -f collectd remove + +echo "==== Seting up btrfs user home ===" +if [[ ! -f "${USER_HOME_FILE}" ]]; then + # create a separate 12GB fs for data + truncate -s 12G "${USER_HOME_FILE}" + mkfs.btrfs -L UserHome "${USER_HOME_FILE}" + echo "${USER_HOME_FILE} ${USER_HOME} btrfs loop,nosuid 0 0" >> /etc/fstab + mount "${USER_HOME_FILE}" + btrfs subvolume create "${USER_HOME}/data" +fi + +echo "=== Install tmpreaper ===" +sudo apt-get install -y tmpreaper +sudo sed -e 's/SHOWWARNING=true/# SHOWWARNING=true/' -i /etc/tmpreaper.conf + +echo "==== Extracting installer source ====" +rm -rf "${INSTALLER_SOURCE_DIR}" && mkdir -p "${INSTALLER_SOURCE_DIR}" +tar xvf /root/installer.tar -C "${INSTALLER_SOURCE_DIR}" && rm /root/installer.tar +echo "${INSTALLER_REVISION}" > "${INSTALLER_SOURCE_DIR}/REVISION" + +echo "==== Install nodejs ====" +apt-get install -y curl +curl -sL https://deb.nodesource.com/setup_0.12 | bash - +apt-get install -y nodejs + +echo "=== Rebuilding npm packages ===" +cd "${INSTALLER_SOURCE_DIR}" && npm install --production + +echo "==== Make the user own his home ====" +chown "${USER}:${USER}" -R "/home/${USER}" + +echo "==== Install init script ====" +cat > /etc/init.d/cloudron-bootstrap <&2 + exit 3 + ;; +esac +EOF + +chmod +x /etc/init.d/cloudron-bootstrap +update-rc.d cloudron-bootstrap defaults 99 + +sync + diff --git a/npm-shrinkwrap.json b/npm-shrinkwrap.json new file mode 100644 index 000000000..aed99cbf2 --- /dev/null +++ b/npm-shrinkwrap.json @@ -0,0 +1,923 @@ +{ + "name": "installer", + "version": "0.0.1", + "dependencies": { + "async": { + "version": "0.9.0", + "from": "https://registry.npmjs.org/async/-/async-0.9.0.tgz", + "resolved": "https://registry.npmjs.org/async/-/async-0.9.0.tgz" + }, + "body-parser": { + "version": "1.12.2", + "from": "https://registry.npmjs.org/body-parser/-/body-parser-1.12.2.tgz", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.12.2.tgz", + "dependencies": { + "bytes": { + "version": "1.0.0", + "from": "http://registry.npmjs.org/bytes/-/bytes-1.0.0.tgz", + "resolved": "http://registry.npmjs.org/bytes/-/bytes-1.0.0.tgz" + }, + "content-type": { + "version": "1.0.1", + "from": "https://registry.npmjs.org/content-type/-/content-type-1.0.1.tgz", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.1.tgz" + }, + "depd": { + "version": "1.0.0", + "from": "http://registry.npmjs.org/depd/-/depd-1.0.0.tgz", + "resolved": "http://registry.npmjs.org/depd/-/depd-1.0.0.tgz" + }, + "iconv-lite": { + "version": "0.4.7", + "from": "http://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.7.tgz", + "resolved": "http://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.7.tgz" + }, + "on-finished": { + "version": "2.2.0", + "from": "https://registry.npmjs.org/on-finished/-/on-finished-2.2.0.tgz", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.2.0.tgz", + "dependencies": { + "ee-first": { + "version": "1.1.0", + "from": "http://registry.npmjs.org/ee-first/-/ee-first-1.1.0.tgz", + "resolved": "http://registry.npmjs.org/ee-first/-/ee-first-1.1.0.tgz" + } + } + }, + "qs": { + "version": "2.4.1", + "from": "https://registry.npmjs.org/qs/-/qs-2.4.1.tgz", + "resolved": "https://registry.npmjs.org/qs/-/qs-2.4.1.tgz" + }, + "raw-body": { + "version": "1.3.3", + "from": "http://registry.npmjs.org/raw-body/-/raw-body-1.3.3.tgz", + "resolved": "http://registry.npmjs.org/raw-body/-/raw-body-1.3.3.tgz" + }, + "type-is": { + "version": "1.6.1", + "from": "https://registry.npmjs.org/type-is/-/type-is-1.6.1.tgz", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.1.tgz", + "dependencies": { + "media-typer": { + "version": "0.3.0", + "from": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "resolved": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz" + }, + "mime-types": { + "version": "2.0.10", + "from": "https://registry.npmjs.org/mime-types/-/mime-types-2.0.10.tgz", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.0.10.tgz", + "dependencies": { + "mime-db": { + "version": "1.8.0", + "from": "https://registry.npmjs.org/mime-db/-/mime-db-1.8.0.tgz", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.8.0.tgz" + } + } + } + } + } + } + }, + "connect-lastmile": { + "version": "0.0.10", + "from": "http://registry.npmjs.org/connect-lastmile/-/connect-lastmile-0.0.10.tgz", + "resolved": "http://registry.npmjs.org/connect-lastmile/-/connect-lastmile-0.0.10.tgz" + }, + "debug": { + "version": "2.1.3", + "from": "https://registry.npmjs.org/debug/-/debug-2.1.3.tgz", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.1.3.tgz", + "dependencies": { + "ms": { + "version": "0.7.0", + "from": "http://registry.npmjs.org/ms/-/ms-0.7.0.tgz", + "resolved": "http://registry.npmjs.org/ms/-/ms-0.7.0.tgz" + } + } + }, + "express": { + "version": "4.12.3", + "from": "https://registry.npmjs.org/express/-/express-4.12.3.tgz", + "resolved": "https://registry.npmjs.org/express/-/express-4.12.3.tgz", + "dependencies": { + "accepts": { + "version": "1.2.5", + "from": "https://registry.npmjs.org/accepts/-/accepts-1.2.5.tgz", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.2.5.tgz", + "dependencies": { + "mime-types": { + "version": "2.0.10", + "from": "https://registry.npmjs.org/mime-types/-/mime-types-2.0.10.tgz", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.0.10.tgz", + "dependencies": { + "mime-db": { + "version": "1.8.0", + "from": "https://registry.npmjs.org/mime-db/-/mime-db-1.8.0.tgz", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.8.0.tgz" + } + } + }, + "negotiator": { + "version": "0.5.1", + "from": "https://registry.npmjs.org/negotiator/-/negotiator-0.5.1.tgz", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.5.1.tgz" + } + } + }, + "content-disposition": { + "version": "0.5.0", + "from": "http://registry.npmjs.org/content-disposition/-/content-disposition-0.5.0.tgz", + "resolved": "http://registry.npmjs.org/content-disposition/-/content-disposition-0.5.0.tgz" + }, + "content-type": { + "version": "1.0.1", + "from": "https://registry.npmjs.org/content-type/-/content-type-1.0.1.tgz", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.1.tgz" + }, + "cookie": { + "version": "0.1.2", + "from": "https://registry.npmjs.org/cookie/-/cookie-0.1.2.tgz", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.1.2.tgz" + }, + "cookie-signature": { + "version": "1.0.6", + "from": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz" + }, + "depd": { + "version": "1.0.0", + "from": "http://registry.npmjs.org/depd/-/depd-1.0.0.tgz", + "resolved": "http://registry.npmjs.org/depd/-/depd-1.0.0.tgz" + }, + "escape-html": { + "version": "1.0.1", + "from": "http://registry.npmjs.org/escape-html/-/escape-html-1.0.1.tgz", + "resolved": "http://registry.npmjs.org/escape-html/-/escape-html-1.0.1.tgz" + }, + "etag": { + "version": "1.5.1", + "from": "http://registry.npmjs.org/etag/-/etag-1.5.1.tgz", + "resolved": "http://registry.npmjs.org/etag/-/etag-1.5.1.tgz", + "dependencies": { + "crc": { + "version": "3.2.1", + "from": "http://registry.npmjs.org/crc/-/crc-3.2.1.tgz", + "resolved": "http://registry.npmjs.org/crc/-/crc-3.2.1.tgz" + } + } + }, + "finalhandler": { + "version": "0.3.4", + "from": "http://registry.npmjs.org/finalhandler/-/finalhandler-0.3.4.tgz", + "resolved": "http://registry.npmjs.org/finalhandler/-/finalhandler-0.3.4.tgz" + }, + "fresh": { + "version": "0.2.4", + "from": "http://registry.npmjs.org/fresh/-/fresh-0.2.4.tgz", + "resolved": "http://registry.npmjs.org/fresh/-/fresh-0.2.4.tgz" + }, + "merge-descriptors": { + "version": "1.0.0", + "from": "http://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.0.tgz", + "resolved": "http://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.0.tgz" + }, + "methods": { + "version": "1.1.1", + "from": "https://registry.npmjs.org/methods/-/methods-1.1.1.tgz", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.1.tgz" + }, + "on-finished": { + "version": "2.2.0", + "from": "https://registry.npmjs.org/on-finished/-/on-finished-2.2.0.tgz", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.2.0.tgz", + "dependencies": { + "ee-first": { + "version": "1.1.0", + "from": "http://registry.npmjs.org/ee-first/-/ee-first-1.1.0.tgz", + "resolved": "http://registry.npmjs.org/ee-first/-/ee-first-1.1.0.tgz" + } + } + }, + "parseurl": { + "version": "1.3.0", + "from": "http://registry.npmjs.org/parseurl/-/parseurl-1.3.0.tgz", + "resolved": "http://registry.npmjs.org/parseurl/-/parseurl-1.3.0.tgz" + }, + "path-to-regexp": { + "version": "0.1.3", + "from": "http://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.3.tgz", + "resolved": "http://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.3.tgz" + }, + "proxy-addr": { + "version": "1.0.7", + "from": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-1.0.7.tgz", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-1.0.7.tgz", + "dependencies": { + "forwarded": { + "version": "0.1.0", + "from": "http://registry.npmjs.org/forwarded/-/forwarded-0.1.0.tgz", + "resolved": "http://registry.npmjs.org/forwarded/-/forwarded-0.1.0.tgz" + }, + "ipaddr.js": { + "version": "0.1.9", + "from": "http://registry.npmjs.org/ipaddr.js/-/ipaddr.js-0.1.9.tgz", + "resolved": "http://registry.npmjs.org/ipaddr.js/-/ipaddr.js-0.1.9.tgz" + } + } + }, + "qs": { + "version": "2.4.1", + "from": "https://registry.npmjs.org/qs/-/qs-2.4.1.tgz", + "resolved": "https://registry.npmjs.org/qs/-/qs-2.4.1.tgz" + }, + "range-parser": { + "version": "1.0.2", + "from": "http://registry.npmjs.org/range-parser/-/range-parser-1.0.2.tgz", + "resolved": "http://registry.npmjs.org/range-parser/-/range-parser-1.0.2.tgz" + }, + "send": { + "version": "0.12.2", + "from": "https://registry.npmjs.org/send/-/send-0.12.2.tgz", + "resolved": "https://registry.npmjs.org/send/-/send-0.12.2.tgz", + "dependencies": { + "destroy": { + "version": "1.0.3", + "from": "http://registry.npmjs.org/destroy/-/destroy-1.0.3.tgz", + "resolved": "http://registry.npmjs.org/destroy/-/destroy-1.0.3.tgz" + }, + "mime": { + "version": "1.3.4", + "from": "https://registry.npmjs.org/mime/-/mime-1.3.4.tgz", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.3.4.tgz" + }, + "ms": { + "version": "0.7.0", + "from": "http://registry.npmjs.org/ms/-/ms-0.7.0.tgz", + "resolved": "http://registry.npmjs.org/ms/-/ms-0.7.0.tgz" + } + } + }, + "serve-static": { + "version": "1.9.2", + "from": "https://registry.npmjs.org/serve-static/-/serve-static-1.9.2.tgz", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.9.2.tgz" + }, + "type-is": { + "version": "1.6.1", + "from": "https://registry.npmjs.org/type-is/-/type-is-1.6.1.tgz", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.1.tgz", + "dependencies": { + "media-typer": { + "version": "0.3.0", + "from": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "resolved": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz" + }, + "mime-types": { + "version": "2.0.10", + "from": "https://registry.npmjs.org/mime-types/-/mime-types-2.0.10.tgz", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.0.10.tgz", + "dependencies": { + "mime-db": { + "version": "1.8.0", + "from": "https://registry.npmjs.org/mime-db/-/mime-db-1.8.0.tgz", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.8.0.tgz" + } + } + } + } + }, + "vary": { + "version": "1.0.0", + "from": "http://registry.npmjs.org/vary/-/vary-1.0.0.tgz", + "resolved": "http://registry.npmjs.org/vary/-/vary-1.0.0.tgz" + }, + "utils-merge": { + "version": "1.0.0", + "from": "http://registry.npmjs.org/utils-merge/-/utils-merge-1.0.0.tgz", + "resolved": "http://registry.npmjs.org/utils-merge/-/utils-merge-1.0.0.tgz" + } + } + }, + "forever": { + "version": "0.14.1", + "from": "http://registry.npmjs.org/forever/-/forever-0.14.1.tgz", + "resolved": "http://registry.npmjs.org/forever/-/forever-0.14.1.tgz", + "dependencies": { + "colors": { + "version": "0.6.2", + "from": "https://registry.npmjs.org/colors/-/colors-0.6.2.tgz", + "resolved": "https://registry.npmjs.org/colors/-/colors-0.6.2.tgz" + }, + "cliff": { + "version": "0.1.10", + "from": "http://registry.npmjs.org/cliff/-/cliff-0.1.10.tgz", + "resolved": "http://registry.npmjs.org/cliff/-/cliff-0.1.10.tgz", + "dependencies": { + "colors": { + "version": "1.0.3", + "from": "https://registry.npmjs.org/colors/-/colors-1.0.3.tgz", + "resolved": "https://registry.npmjs.org/colors/-/colors-1.0.3.tgz" + }, + "eyes": { + "version": "0.1.8", + "from": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", + "resolved": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz" + } + } + }, + "flatiron": { + "version": "0.4.3", + "from": "http://registry.npmjs.org/flatiron/-/flatiron-0.4.3.tgz", + "resolved": "http://registry.npmjs.org/flatiron/-/flatiron-0.4.3.tgz", + "dependencies": { + "broadway": { + "version": "0.3.6", + "from": "http://registry.npmjs.org/broadway/-/broadway-0.3.6.tgz", + "resolved": "http://registry.npmjs.org/broadway/-/broadway-0.3.6.tgz", + "dependencies": { + "cliff": { + "version": "0.1.9", + "from": "http://registry.npmjs.org/cliff/-/cliff-0.1.9.tgz", + "resolved": "http://registry.npmjs.org/cliff/-/cliff-0.1.9.tgz", + "dependencies": { + "eyes": { + "version": "0.1.8", + "from": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", + "resolved": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz" + } + } + }, + "eventemitter2": { + "version": "0.4.14", + "from": "http://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz", + "resolved": "http://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz" + }, + "winston": { + "version": "0.8.0", + "from": "http://registry.npmjs.org/winston/-/winston-0.8.0.tgz", + "resolved": "http://registry.npmjs.org/winston/-/winston-0.8.0.tgz", + "dependencies": { + "async": { + "version": "0.2.10", + "from": "https://registry.npmjs.org/async/-/async-0.2.10.tgz", + "resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz" + }, + "cycle": { + "version": "1.0.3", + "from": "http://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz", + "resolved": "http://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz" + }, + "eyes": { + "version": "0.1.8", + "from": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", + "resolved": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz" + }, + "pkginfo": { + "version": "0.3.0", + "from": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz", + "resolved": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz" + }, + "stack-trace": { + "version": "0.0.9", + "from": "http://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz", + "resolved": "http://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz" + } + } + } + } + }, + "optimist": { + "version": "0.6.0", + "from": "http://registry.npmjs.org/optimist/-/optimist-0.6.0.tgz", + "resolved": "http://registry.npmjs.org/optimist/-/optimist-0.6.0.tgz", + "dependencies": { + "wordwrap": { + "version": "0.0.2", + "from": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", + "resolved": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz" + }, + "minimist": { + "version": "0.0.10", + "from": "http://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz", + "resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz" + } + } + }, + "prompt": { + "version": "0.2.14", + "from": "http://registry.npmjs.org/prompt/-/prompt-0.2.14.tgz", + "resolved": "http://registry.npmjs.org/prompt/-/prompt-0.2.14.tgz", + "dependencies": { + "pkginfo": { + "version": "0.3.0", + "from": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz", + "resolved": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz" + }, + "read": { + "version": "1.0.5", + "from": "http://registry.npmjs.org/read/-/read-1.0.5.tgz", + "resolved": "http://registry.npmjs.org/read/-/read-1.0.5.tgz", + "dependencies": { + "mute-stream": { + "version": "0.0.4", + "from": "http://registry.npmjs.org/mute-stream/-/mute-stream-0.0.4.tgz", + "resolved": "http://registry.npmjs.org/mute-stream/-/mute-stream-0.0.4.tgz" + } + } + }, + "revalidator": { + "version": "0.1.8", + "from": "http://registry.npmjs.org/revalidator/-/revalidator-0.1.8.tgz", + "resolved": "http://registry.npmjs.org/revalidator/-/revalidator-0.1.8.tgz" + } + } + }, + "director": { + "version": "1.2.7", + "from": "http://registry.npmjs.org/director/-/director-1.2.7.tgz", + "resolved": "http://registry.npmjs.org/director/-/director-1.2.7.tgz" + } + } + }, + "forever-monitor": { + "version": "1.5.2", + "from": "http://registry.npmjs.org/forever-monitor/-/forever-monitor-1.5.2.tgz", + "resolved": "http://registry.npmjs.org/forever-monitor/-/forever-monitor-1.5.2.tgz", + "dependencies": { + "broadway": { + "version": "0.3.6", + "from": "http://registry.npmjs.org/broadway/-/broadway-0.3.6.tgz", + "resolved": "http://registry.npmjs.org/broadway/-/broadway-0.3.6.tgz", + "dependencies": { + "cliff": { + "version": "0.1.9", + "from": "http://registry.npmjs.org/cliff/-/cliff-0.1.9.tgz", + "resolved": "http://registry.npmjs.org/cliff/-/cliff-0.1.9.tgz", + "dependencies": { + "eyes": { + "version": "0.1.8", + "from": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", + "resolved": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz" + } + } + }, + "eventemitter2": { + "version": "0.4.14", + "from": "http://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz", + "resolved": "http://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz" + }, + "winston": { + "version": "0.8.0", + "from": "http://registry.npmjs.org/winston/-/winston-0.8.0.tgz", + "resolved": "http://registry.npmjs.org/winston/-/winston-0.8.0.tgz", + "dependencies": { + "async": { + "version": "0.2.10", + "from": "https://registry.npmjs.org/async/-/async-0.2.10.tgz", + "resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz" + }, + "cycle": { + "version": "1.0.3", + "from": "http://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz", + "resolved": "http://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz" + }, + "eyes": { + "version": "0.1.8", + "from": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", + "resolved": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz" + }, + "pkginfo": { + "version": "0.3.0", + "from": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz", + "resolved": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz" + }, + "stack-trace": { + "version": "0.0.9", + "from": "http://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz", + "resolved": "http://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz" + } + } + } + } + }, + "minimatch": { + "version": "1.0.0", + "from": "http://registry.npmjs.org/minimatch/-/minimatch-1.0.0.tgz", + "resolved": "http://registry.npmjs.org/minimatch/-/minimatch-1.0.0.tgz", + "dependencies": { + "lru-cache": { + "version": "2.5.0", + "from": "http://registry.npmjs.org/lru-cache/-/lru-cache-2.5.0.tgz", + "resolved": "http://registry.npmjs.org/lru-cache/-/lru-cache-2.5.0.tgz" + }, + "sigmund": { + "version": "1.0.0", + "from": "http://registry.npmjs.org/sigmund/-/sigmund-1.0.0.tgz", + "resolved": "http://registry.npmjs.org/sigmund/-/sigmund-1.0.0.tgz" + } + } + }, + "ps-tree": { + "version": "0.0.3", + "from": "http://registry.npmjs.org/ps-tree/-/ps-tree-0.0.3.tgz", + "resolved": "http://registry.npmjs.org/ps-tree/-/ps-tree-0.0.3.tgz", + "dependencies": { + "event-stream": { + "version": "0.5.3", + "from": "http://registry.npmjs.org/event-stream/-/event-stream-0.5.3.tgz", + "resolved": "http://registry.npmjs.org/event-stream/-/event-stream-0.5.3.tgz", + "dependencies": { + "optimist": { + "version": "0.2.8", + "from": "http://registry.npmjs.org/optimist/-/optimist-0.2.8.tgz", + "resolved": "http://registry.npmjs.org/optimist/-/optimist-0.2.8.tgz", + "dependencies": { + "wordwrap": { + "version": "0.0.2", + "from": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", + "resolved": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz" + } + } + } + } + } + } + }, + "watch": { + "version": "0.13.0", + "from": "http://registry.npmjs.org/watch/-/watch-0.13.0.tgz", + "resolved": "http://registry.npmjs.org/watch/-/watch-0.13.0.tgz", + "dependencies": { + "minimist": { + "version": "1.1.1", + "from": "http://registry.npmjs.org/minimist/-/minimist-1.1.1.tgz", + "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.1.1.tgz" + } + } + } + } + }, + "nconf": { + "version": "0.6.9", + "from": "http://registry.npmjs.org/nconf/-/nconf-0.6.9.tgz", + "resolved": "http://registry.npmjs.org/nconf/-/nconf-0.6.9.tgz", + "dependencies": { + "async": { + "version": "0.2.9", + "from": "http://registry.npmjs.org/async/-/async-0.2.9.tgz", + "resolved": "http://registry.npmjs.org/async/-/async-0.2.9.tgz" + }, + "ini": { + "version": "1.3.3", + "from": "http://registry.npmjs.org/ini/-/ini-1.3.3.tgz", + "resolved": "http://registry.npmjs.org/ini/-/ini-1.3.3.tgz" + }, + "optimist": { + "version": "0.6.0", + "from": "http://registry.npmjs.org/optimist/-/optimist-0.6.0.tgz", + "resolved": "http://registry.npmjs.org/optimist/-/optimist-0.6.0.tgz", + "dependencies": { + "wordwrap": { + "version": "0.0.2", + "from": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", + "resolved": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz" + }, + "minimist": { + "version": "0.0.10", + "from": "http://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz", + "resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz" + } + } + } + } + }, + "nssocket": { + "version": "0.5.3", + "from": "http://registry.npmjs.org/nssocket/-/nssocket-0.5.3.tgz", + "resolved": "http://registry.npmjs.org/nssocket/-/nssocket-0.5.3.tgz", + "dependencies": { + "eventemitter2": { + "version": "0.4.14", + "from": "http://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz", + "resolved": "http://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz" + }, + "lazy": { + "version": "1.0.11", + "from": "http://registry.npmjs.org/lazy/-/lazy-1.0.11.tgz", + "resolved": "http://registry.npmjs.org/lazy/-/lazy-1.0.11.tgz" + } + } + }, + "optimist": { + "version": "0.6.1", + "from": "http://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz", + "resolved": "http://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz", + "dependencies": { + "wordwrap": { + "version": "0.0.2", + "from": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", + "resolved": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz" + }, + "minimist": { + "version": "0.0.10", + "from": "http://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz", + "resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz" + } + } + }, + "timespan": { + "version": "2.3.0", + "from": "http://registry.npmjs.org/timespan/-/timespan-2.3.0.tgz", + "resolved": "http://registry.npmjs.org/timespan/-/timespan-2.3.0.tgz" + }, + "utile": { + "version": "0.2.1", + "from": "http://registry.npmjs.org/utile/-/utile-0.2.1.tgz", + "resolved": "http://registry.npmjs.org/utile/-/utile-0.2.1.tgz", + "dependencies": { + "async": { + "version": "0.2.10", + "from": "https://registry.npmjs.org/async/-/async-0.2.10.tgz", + "resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz" + }, + "deep-equal": { + "version": "1.0.0", + "from": "http://registry.npmjs.org/deep-equal/-/deep-equal-1.0.0.tgz", + "resolved": "http://registry.npmjs.org/deep-equal/-/deep-equal-1.0.0.tgz" + }, + "i": { + "version": "0.3.3", + "from": "http://registry.npmjs.org/i/-/i-0.3.3.tgz", + "resolved": "http://registry.npmjs.org/i/-/i-0.3.3.tgz" + }, + "mkdirp": { + "version": "0.5.0", + "from": "http://registry.npmjs.org/mkdirp/-/mkdirp-0.5.0.tgz", + "resolved": "http://registry.npmjs.org/mkdirp/-/mkdirp-0.5.0.tgz", + "dependencies": { + "minimist": { + "version": "0.0.8", + "from": "http://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", + "resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz" + } + } + }, + "ncp": { + "version": "0.4.2", + "from": "http://registry.npmjs.org/ncp/-/ncp-0.4.2.tgz", + "resolved": "http://registry.npmjs.org/ncp/-/ncp-0.4.2.tgz" + }, + "rimraf": { + "version": "2.3.2", + "from": "https://registry.npmjs.org/rimraf/-/rimraf-2.3.2.tgz", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.3.2.tgz", + "dependencies": { + "glob": { + "version": "4.5.3", + "from": "http://registry.npmjs.org/glob/-/glob-4.5.3.tgz", + "resolved": "http://registry.npmjs.org/glob/-/glob-4.5.3.tgz", + "dependencies": { + "inflight": { + "version": "1.0.4", + "from": "http://registry.npmjs.org/inflight/-/inflight-1.0.4.tgz", + "resolved": "http://registry.npmjs.org/inflight/-/inflight-1.0.4.tgz", + "dependencies": { + "wrappy": { + "version": "1.0.1", + "from": "http://registry.npmjs.org/wrappy/-/wrappy-1.0.1.tgz", + "resolved": "http://registry.npmjs.org/wrappy/-/wrappy-1.0.1.tgz" + } + } + }, + "inherits": { + "version": "2.0.1", + "from": "http://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz", + "resolved": "http://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz" + }, + "minimatch": { + "version": "2.0.4", + "from": "http://registry.npmjs.org/minimatch/-/minimatch-2.0.4.tgz", + "resolved": "http://registry.npmjs.org/minimatch/-/minimatch-2.0.4.tgz", + "dependencies": { + "brace-expansion": { + "version": "1.1.0", + "from": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.0.tgz", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.0.tgz", + "dependencies": { + "balanced-match": { + "version": "0.2.0", + "from": "https://registry.npmjs.org/balanced-match/-/balanced-match-0.2.0.tgz", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-0.2.0.tgz" + }, + "concat-map": { + "version": "0.0.1", + "from": "http://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "resolved": "http://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz" + } + } + } + } + }, + "once": { + "version": "1.3.1", + "from": "http://registry.npmjs.org/once/-/once-1.3.1.tgz", + "resolved": "http://registry.npmjs.org/once/-/once-1.3.1.tgz", + "dependencies": { + "wrappy": { + "version": "1.0.1", + "from": "http://registry.npmjs.org/wrappy/-/wrappy-1.0.1.tgz", + "resolved": "http://registry.npmjs.org/wrappy/-/wrappy-1.0.1.tgz" + } + } + } + } + } + } + } + } + }, + "winston": { + "version": "0.8.3", + "from": "http://registry.npmjs.org/winston/-/winston-0.8.3.tgz", + "resolved": "http://registry.npmjs.org/winston/-/winston-0.8.3.tgz", + "dependencies": { + "async": { + "version": "0.2.10", + "from": "https://registry.npmjs.org/async/-/async-0.2.10.tgz", + "resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz" + }, + "cycle": { + "version": "1.0.3", + "from": "http://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz", + "resolved": "http://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz" + }, + "eyes": { + "version": "0.1.8", + "from": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", + "resolved": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz" + }, + "isstream": { + "version": "0.1.2", + "from": "http://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "resolved": "http://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz" + }, + "pkginfo": { + "version": "0.3.0", + "from": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz", + "resolved": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz" + }, + "stack-trace": { + "version": "0.0.9", + "from": "http://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz", + "resolved": "http://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz" + } + } + } + } + }, + "json": { + "version": "9.0.3", + "from": "https://registry.npmjs.org/json/-/json-9.0.3.tgz", + "resolved": "https://registry.npmjs.org/json/-/json-9.0.3.tgz" + }, + "morgan": { + "version": "1.5.2", + "from": "https://registry.npmjs.org/morgan/-/morgan-1.5.2.tgz", + "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.5.2.tgz", + "dependencies": { + "basic-auth": { + "version": "1.0.0", + "from": "http://registry.npmjs.org/basic-auth/-/basic-auth-1.0.0.tgz", + "resolved": "http://registry.npmjs.org/basic-auth/-/basic-auth-1.0.0.tgz" + }, + "depd": { + "version": "1.0.0", + "from": "http://registry.npmjs.org/depd/-/depd-1.0.0.tgz", + "resolved": "http://registry.npmjs.org/depd/-/depd-1.0.0.tgz" + }, + "on-finished": { + "version": "2.2.0", + "from": "https://registry.npmjs.org/on-finished/-/on-finished-2.2.0.tgz", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.2.0.tgz", + "dependencies": { + "ee-first": { + "version": "1.1.0", + "from": "http://registry.npmjs.org/ee-first/-/ee-first-1.1.0.tgz", + "resolved": "http://registry.npmjs.org/ee-first/-/ee-first-1.1.0.tgz" + } + } + } + } + }, + "proxy-middleware": { + "version": "0.11.0", + "from": "https://registry.npmjs.org/proxy-middleware/-/proxy-middleware-0.11.0.tgz", + "resolved": "https://registry.npmjs.org/proxy-middleware/-/proxy-middleware-0.11.0.tgz" + }, + "safetydance": { + "version": "0.0.16", + "from": "http://registry.npmjs.org/safetydance/-/safetydance-0.0.16.tgz", + "resolved": "http://registry.npmjs.org/safetydance/-/safetydance-0.0.16.tgz" + }, + "superagent": { + "version": "0.21.0", + "from": "https://registry.npmjs.org/superagent/-/superagent-0.21.0.tgz", + "resolved": "https://registry.npmjs.org/superagent/-/superagent-0.21.0.tgz", + "dependencies": { + "qs": { + "version": "1.2.0", + "from": "https://registry.npmjs.org/qs/-/qs-1.2.0.tgz", + "resolved": "https://registry.npmjs.org/qs/-/qs-1.2.0.tgz" + }, + "formidable": { + "version": "1.0.14", + "from": "http://registry.npmjs.org/formidable/-/formidable-1.0.14.tgz", + "resolved": "http://registry.npmjs.org/formidable/-/formidable-1.0.14.tgz" + }, + "mime": { + "version": "1.2.11", + "from": "https://registry.npmjs.org/mime/-/mime-1.2.11.tgz", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.2.11.tgz" + }, + "component-emitter": { + "version": "1.1.2", + "from": "http://registry.npmjs.org/component-emitter/-/component-emitter-1.1.2.tgz", + "resolved": "http://registry.npmjs.org/component-emitter/-/component-emitter-1.1.2.tgz" + }, + "methods": { + "version": "1.0.1", + "from": "https://registry.npmjs.org/methods/-/methods-1.0.1.tgz", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.0.1.tgz" + }, + "cookiejar": { + "version": "2.0.1", + "from": "http://registry.npmjs.org/cookiejar/-/cookiejar-2.0.1.tgz", + "resolved": "http://registry.npmjs.org/cookiejar/-/cookiejar-2.0.1.tgz" + }, + "reduce-component": { + "version": "1.0.1", + "from": "http://registry.npmjs.org/reduce-component/-/reduce-component-1.0.1.tgz", + "resolved": "http://registry.npmjs.org/reduce-component/-/reduce-component-1.0.1.tgz" + }, + "extend": { + "version": "1.2.1", + "from": "https://registry.npmjs.org/extend/-/extend-1.2.1.tgz", + "resolved": "https://registry.npmjs.org/extend/-/extend-1.2.1.tgz" + }, + "form-data": { + "version": "0.1.3", + "from": "http://registry.npmjs.org/form-data/-/form-data-0.1.3.tgz", + "resolved": "http://registry.npmjs.org/form-data/-/form-data-0.1.3.tgz", + "dependencies": { + "combined-stream": { + "version": "0.0.7", + "from": "https://registry.npmjs.org/combined-stream/-/combined-stream-0.0.7.tgz", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-0.0.7.tgz", + "dependencies": { + "delayed-stream": { + "version": "0.0.5", + "from": "http://registry.npmjs.org/delayed-stream/-/delayed-stream-0.0.5.tgz", + "resolved": "http://registry.npmjs.org/delayed-stream/-/delayed-stream-0.0.5.tgz" + } + } + } + } + }, + "readable-stream": { + "version": "1.0.27-1", + "from": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.27-1.tgz", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.27-1.tgz", + "dependencies": { + "core-util-is": { + "version": "1.0.1", + "from": "http://registry.npmjs.org/core-util-is/-/core-util-is-1.0.1.tgz", + "resolved": "http://registry.npmjs.org/core-util-is/-/core-util-is-1.0.1.tgz" + }, + "isarray": { + "version": "0.0.1", + "from": "http://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "resolved": "http://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz" + }, + "string_decoder": { + "version": "0.10.31", + "from": "http://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "resolved": "http://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz" + }, + "inherits": { + "version": "2.0.1", + "from": "http://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz", + "resolved": "http://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz" + } + } + } + } + }, + "tail-stream": { + "version": "0.2.1", + "from": "http://registry.npmjs.org/tail-stream/-/tail-stream-0.2.1.tgz", + "resolved": "http://registry.npmjs.org/tail-stream/-/tail-stream-0.2.1.tgz" + } + } +} diff --git a/package.json b/package.json new file mode 100644 index 000000000..14c4d5bd1 --- /dev/null +++ b/package.json @@ -0,0 +1,52 @@ +{ + "name": "installer", + "description": "Cloudron Installer", + "version": "0.0.1", + "private": "true", + "author": { + "name": "Cloudron authors" + }, + "repository": { + "type": "git" + }, + "engines": [ + "node >= 0.10.0" + ], + "dependencies": { + "async": "^0.9.0", + "body-parser": "^1.12.0", + "connect-lastmile": "0.0.10", + "debug": "^2.1.1", + "express": "^4.11.2", + "forever": "^0.14.1", + "json": "^9.0.3", + "morgan": "^1.5.1", + "proxy-middleware": "^0.11.0", + "safetydance": "0.0.16", + "superagent": "^0.21.0", + "tail-stream": "^0.2.1" + }, + "devDependencies": { + "aws-sdk": "^2.1.10", + "colors": "^1.0.3", + "commander": "^2.6.0", + "easy-table": "^0.3.0", + "expect.js": "^0.3.1", + "istanbul": "^0.3.5", + "lodash": "^3.2.0", + "mocha": "^2.1.0", + "nock": "^0.59.1", + "postmark": "^1.0.0", + "readline-sync": "^0.8.0", + "semver": "^4.3.0", + "ssh2": "^0.4.6", + "supererror": "^0.6.0", + "yesno": "0.0.1" + }, + "scripts": { + "test": "NODE_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- -R spec ./src/test", + "precommit": "/bin/true", + "prepush": "npm test", + "postmerge": "/bin/true" + } +} diff --git a/release/CHANGES b/release/CHANGES new file mode 100644 index 000000000..f90ad0b9d --- /dev/null +++ b/release/CHANGES @@ -0,0 +1,85 @@ +[0.0.1] +- Hot Chocolate + +[0.0.2] +- Hotfix appstore ui in webadim + +[0.0.3] +- Tall Pike + +[0.0.4] +- This will be 0.0.4 changes + +[0.0.5] +- App install/configure route fixes + +[0.0.6] +- Not sure what happenned here + +[0.0.7] +- resetToken is now sent as part of create user +- Same as 0.0.7 which got released by mistake + +[0.0.8] +- Manifest changes + +[0.0.9] +- Fix app restore +- Fix backup issues + +[0.0.10] +- Unknown orchestra + +[0.0.11] +- Add ldap addon + +[0.0.12] +- Support OAuth2 state + +[0.0.13] +- Use docker image from cloudron repository + +[0.0.14] +- Improve setup flow + +[0.0.15] +- Improved Appstore view + +[0.0.16] +- Improved Backup approach + +[0.0.17] +- Upgrade testing +- App auto updates +- Usage graphs + +[0.0.18] +- Rework backups and updates + +[0.0.19] +- Graphite fixes +- Avatar and Cloudron name support + +[0.0.20] +- Apptask fixes +- Chrome related fixes + +[0.0.21] +- Increase nginx hostname size to 64 + +[0.0.22] +- Testing the e2e tests + +[0.0.23] +- Better error status page +- Fix updater and backup progress reporting +- New avatar set +- Improved setup wizard + +[0.0.24] +- Hotfix the ldap support + +[0.0.25] +- Add support page +- Really fix ldap issues + diff --git a/release/images b/release/images new file mode 100755 index 000000000..b03c9c344 --- /dev/null +++ b/release/images @@ -0,0 +1,216 @@ +#!/usr/bin/env node + +'use strict'; + +require('supererror')({ splatchError: true }); + +var superagent = require('superagent'), + async = require('async'), + yesno = require('yesno'), + p = require('commander'); + +var DIGITALOCEAN = 'https://api.digitalocean.com/v2'; + +p.version('0.0.1') + .option('-l, --list', 'List images (default if neither --list or --cleanup provided') + .option('--cleanup', 'Delete images, which are not part of an release') + .option('-a, --all', 'Images from all environments (default if no argument provided)') + .option('-d, --development', 'Images from development') + .option('-s, --staging', 'Images from staging') + .option('-p, --production', 'Images from production') + .parse(process.argv); + +if (p.list || !p.cleanup) { + p.list = true; +} + +if (p.all || !(p.development || p.staging || p.production)) { + p.development = true; + p.staging = true; + p.production = true; +} + +function deleteImage(image, token, callback) { + var url = DIGITALOCEAN + '/images/' + image.id; + + console.log('Deleting image %s ...', image.name); + + superagent.del(url).set('Authorization', 'Bearer ' + token).end(function (error, result) { + if (error || result.error) return callback(error || result.error); + + callback(null); + }); +} + +function listImages(token, callback) { + var images = []; + var nextPage = null; + + async.doWhilst(function (callback) { + var url = DIGITALOCEAN + '/images?private=true'; + + superagent.get(url).set('Authorization', 'Bearer ' + token).end(function (error, result) { + if (error || result.error) return callback(error || result.error); + + nextPage = (result.body.links && result.body.links.pages && nextPage !== result.body.links.pages.next) ? result.body.links.pages.next : null; + images = images.concat(result.body.images); + + callback(null); + }); + }, function () { return !!nextPage; }, function (error) { + if (error) return callback(error); + callback(null, images); + }); +} + +function printEnvironment(tag, items, releases, callback) { + console.log(''); + console.log('%s:', tag); + console.log(''); + + var imageRegExp = new RegExp('box-(?:dev|staging|prod)-[0-9,a-f]{7}-[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]{6}'); + + items.forEach(function (item) { + if (!imageRegExp.test(item.name)) return; + + var releaseNumber = []; + for (var release in releases) { + if (releases.hasOwnProperty(release)) { + if (releases[release].imageId === item.id) { + releaseNumber.push(release); + } + } + } + + console.log(' %s : %s %s\t[%s]', item.id, item.name, releaseNumber.length ? releaseNumber.join(', ') : ' ', item.regions); + }); + + console.log(''); + + callback(null); +} + +function cleanupEnvironment(env, tag, items, releases, callback) { + console.log(''); + console.log('Cleanup images on %s:', tag); + + var imagesToCleanup = []; + + var imageRegExp = new RegExp('box-(?:dev|staging|prod)-[0-9,a-f]{7}-[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]{6}'); + + items.forEach(function (item) { + if (!imageRegExp.test(item.name)) return; + + for (var release in releases) { + if (releases.hasOwnProperty(release)) { + if (releases[release].imageId === item.id) { + return; + } + } + } + + // we reached here so no release found + imagesToCleanup.push(item); + }); + + if (imagesToCleanup.length === 0) { + console.log('All images belong to a release.'); + return callback(null); + } + + imagesToCleanup.forEach(function (item) { + console.log(' %s : %s [%s]', item.id, item.name, item.regions); + }); + + console.log(''); + + yesno.ask('Do you want to delete those images? [y/N]', false, function (ok) { + if (ok) { + async.each(imagesToCleanup, function (image, callback) { + deleteImage(image, process.env[env], callback); + }, callback); + return; + } + + callback(null); + }); +} + + +function handleListEnvironment(active, env, tag, releaseUrl) { + return function (callback) { + if (!active) return callback(null); + if (!process.env[env]) { + console.log('%s not set. Skipping %s.', env, tag); + return callback(null); + } + + listImages(process.env[env], function (error, result) { + if (error) return callback(error); + + var images = result; + superagent.get(releaseUrl).end(function (error, result) { + if (error || result.error) return callback(error || result.error); + + // we get it as text + var releases = JSON.parse(result.text); + + printEnvironment(tag, images, releases, callback); + }); + }); + }; +} + +function handleCleanupEnvironment(active, env, tag, releaseUrl) { + return function (callback) { + if (!active) return callback(null); + if (!process.env[env]) { + console.log('%s not set. Skipping %s.', env, tag); + return callback(null); + } + + listImages(process.env[env], function (error, result) { + if (error) return callback(error); + + var images = result; + superagent.get(releaseUrl).end(function (error, result) { + if (error || result.error) return callback(error || result.error); + + // we get it as text + var releases = JSON.parse(result.text); + + cleanupEnvironment(env, tag, images, releases, callback); + }); + }); + }; +} + +if (p.list) { + async.series([ + handleListEnvironment(p.development, 'DIGITAL_OCEAN_TOKEN_DEV', 'Development', 'https://s3.amazonaws.com/dev-cloudron-releases/versions.json'), + handleListEnvironment(p.staging, 'DIGITAL_OCEAN_TOKEN_STAGING', 'Staging', 'https://s3.amazonaws.com/staging-cloudron-releases/versions.json'), + handleListEnvironment(p.production, 'DIGITAL_OCEAN_TOKEN_PROD', 'Production', 'https://s3.amazonaws.com/prod-cloudron-releases/versions.json') + ], function (error) { + if (error) { + console.log(error); + process.exit(1); + } + + process.exit(0); + }); +} + +if (p.cleanup) { + async.series([ + handleCleanupEnvironment(p.development, 'DIGITAL_OCEAN_TOKEN_DEV', 'Development', 'https://s3.amazonaws.com/dev-cloudron-releases/versions.json'), + handleCleanupEnvironment(p.staging, 'DIGITAL_OCEAN_TOKEN_STAGING', 'Staging', 'https://s3.amazonaws.com/staging-cloudron-releases/versions.json'), + handleCleanupEnvironment(p.production, 'DIGITAL_OCEAN_TOKEN_PROD', 'Production', 'https://s3.amazonaws.com/prod-cloudron-releases/versions.json') + ], function (error) { + if (error) { + console.log(error); + process.exit(1); + } + + process.exit(0); + }); +} diff --git a/release/parsechangelog.js b/release/parsechangelog.js new file mode 100644 index 000000000..19d9c6509 --- /dev/null +++ b/release/parsechangelog.js @@ -0,0 +1,31 @@ +'use strict'; + +var fs = require('fs'); + +exports = module.exports = { + parse: parse +}; + +function parse(version) { + var changelog = [ ]; + var lines = fs.readFileSync(__dirname + '/CHANGES', 'utf8').split('\n'); + for (var i = 0; i < lines.length; i++) { + if (lines[i] === '[' + version + ']') break; + } + + for (i = i + 1; i < lines.length; i++) { + if (lines[i] === '') continue; + if (lines[i][0] === '[') break; + + lines[i] = lines[i].trim(); + + // detect and remove list style - and * in changelog lines + if (lines[i].indexOf('-') === 0) lines[i] = lines[i].slice(1).trim(); + if (lines[i].indexOf('*') === 0) lines[i] = lines[i].slice(1).trim(); + + changelog.push(lines[i]); + } + + return changelog; +} + diff --git a/release/release b/release/release new file mode 100755 index 000000000..38ffcd9e2 --- /dev/null +++ b/release/release @@ -0,0 +1,591 @@ +#!/usr/bin/env node + +'use strict'; + +require('supererror')({ splatchError: true }); +require('colors'); + +var superagent = require('superagent'), + async = require('async'), + safe = require('safetydance'), + AWS = require('aws-sdk'), + yesno = require('yesno'), + Table = require('easy-table'), + program = require('commander'), + semver = require('semver'), + util = require('util'), + versionsFormat = require('./versionsformat.js'), + execSync = require('child_process').execSync, + parseChangelog = require('./parsechangelog.js').parse, + url = require('url'), + path = require('path'), + postmark = require('postmark')(process.env.POSTMARK_API_KEY_TOOLS), + assert = require('assert'); + +var DIGITALOCEAN = 'https://api.digitalocean.com/v2'; + +var ENVIRONMENTS = { + 'dev': { + tag: 'dev', + url: 'https://s3.amazonaws.com/dev-cloudron-releases/versions.json', + accessKeyId: process.env.AWS_DEV_ACCESS_KEY, + secretAccessKey: process.env.AWS_DEV_SECRET_KEY, + releasesBucket: 'dev-cloudron-releases', + digitalOceanToken: process.env.DIGITAL_OCEAN_TOKEN_DEV + }, + 'staging': { + tag: 'staging', + url: 'https://s3.amazonaws.com/staging-cloudron-releases/versions.json', + accessKeyId: process.env.AWS_STAGING_ACCESS_KEY, + secretAccessKey: process.env.AWS_STAGING_SECRET_KEY, + releasesBucket: 'staging-cloudron-releases', + digitalOceanToken: process.env.DIGITAL_OCEAN_TOKEN_STAGING + }, + 'prod': { + tag: 'prod', + url: 'https://s3.amazonaws.com/prod-cloudron-releases/versions.json', + accessKeyId: process.env.AWS_PROD_ACCESS_KEY, + secretAccessKey: process.env.AWS_PROD_SECRET_KEY, + releasesBucket: 'prod-cloudron-releases', + digitalOceanToken: process.env.DIGITAL_OCEAN_TOKEN_PROD + } +}; + +function exit(error) { + if (error) console.error(error.message ? error.message.red : error); + + process.exit(error ? 1 : 0); +} + +function notifyAdmins(env, releases, callback) { + console.log('Notifying admins about new release'.gray); + + var sortedVersions = Object.keys(releases).sort(semver.compare); + var oldVersion = sortedVersions[sortedVersions.length - 2], + newVersion = sortedVersions[sortedVersions.length - 1]; + + var oldImageRef = releases[oldVersion].imageName.match('box-(prod|staging|dev)-([0-9a-z.]+)-.*')[2], + newImageRef = releases[newVersion].imageName.match('box-(prod|staging|dev)-([0-9a-z.]+)-.*')[2]; + + var imageLogs = execSync(util.format('git log %s..%s --format=oneline', oldImageRef, newImageRef), { cwd: __dirname }).toString('utf8'), + imageStat = execSync(util.format('git diff --stat %s..%s', oldImageRef, newImageRef), { cwd: __dirname }).toString('utf8'); + + var oldBoxRef = url.parse(releases[oldVersion].sourceTarballUrl).path.match('/box-(.*).tar.gz')[1], + newBoxRef = url.parse(releases[newVersion].sourceTarballUrl).path.match('/box-(.*).tar.gz')[1]; + + var boxRepo = path.resolve(__dirname, '../../box'); + + var boxLogs = execSync(util.format('git log %s..%s --format=oneline', oldBoxRef, newBoxRef), { cwd: boxRepo }).toString('utf8'), + boxStat = execSync(util.format('git diff --stat %s..%s', oldBoxRef, newBoxRef), { cwd: boxRepo }).toString('utf8'); + + var textBody = util.format( + 'A new box release was pushed by %s.\n\n' + + 'Image Changes\n' + + '-----------------\n' + + '%s\n\n%s\n\n' + + 'Box Changes\n' + + '-----------\n' + + '%s\n\n%s\n\n' + + 'Changelog\n' + + '---------\n' + + '%s\n\n' + + 'Release json\n' + + '------------\n' + + '%s\n\n' + + 'Regards,\n' + + 'Release team\n', + releases[newVersion].author, imageLogs, imageStat, boxLogs, boxStat, + releases[newVersion].changelog, JSON.stringify(releases[newVersion], null, 4)); + + postmark.send({ + 'From': 'no-reply@cloudron.io', + 'To': 'admin@cloudron.io', + 'Subject': util.format('[%s] New box release %s', env.tag, newVersion), + 'TextBody': textBody, + 'Tag': 'Important' + }, callback); +} + +function verifyAndUpload(env, releases, callback) { + assert.strictEqual(typeof env, 'object'); + assert.strictEqual(typeof releases, 'object'); + assert.strictEqual(typeof callback, 'function'); + + var s3 = new AWS.S3({ + accessKeyId: env.accessKeyId, + secretAccessKey: env.secretAccessKey + }); + + var error = versionsFormat.verify(releases); + if (error) return callback(error); + + s3.putObject({ + Bucket: env.releasesBucket, + Key: 'versions.json', + ACL: 'public-read', + Body: JSON.stringify(releases, null, 4), + ContentType: 'application/json' + }, function (error, data) { + if (error) return callback(error); + + console.log('Uploaded'.green); + + callback(null); + }); +} + +function newRelease(options) { + var env = ENVIRONMENTS[options.env]; + if (!env) exit(new Error(util.format('Unknown environment %s', options.env))); + + if (!options.file) exit(new Error('--file is required')); + + var contents = safe.fs.readFileSync(options.file, 'utf8'); + if (!contents) exit(safe.error); + + var releases = safe.JSON.parse(contents); + if (!releases) exit(new Error(options.file + ' has invalid json :' + safe.error.message)); + + verifyAndUpload(env, releases, exit); +} + +function createRelease(options) { + var env = ENVIRONMENTS[options.env]; + if (!env) exit(new Error(util.format('Unknown environment %s', options.env))); + + if (env.tag === 'prod') { + if (options.revert || options.rerelease || options.revert) return exit(new Error('operation is not allowed in prod')); + } + + if (!options.rerelease && !options.revert) { + if (!options.code && !options.image) exit(new Error('--code or --image is required')); + } + + if (options.image && !parseInt(options.image, 10)) exit('image must be a number'); + if (options.code && !safe.url.parse(options.code)) exit('code must be a valid url'); + + var username = execSync('git config user.name').toString('utf8').trim(); + var email = execSync('git config user.email').toString('utf8').trim(); + + superagent.get(env.url).end(function (error, result) { + if (error || result.error) return exit(error || result.error); + + var releases = result.type === 'application/json' ? result.body : safe.JSON.parse(result.text); + + if (!releases) exit(new Error('versions.json is not valid JSON')); + + var sortedVersions = Object.keys(releases).sort(semver.rcompare); + var lastVersion = sortedVersions[0]; + + if (options.revert) { + var secondLastVersion = sortedVersions[1]; + + releases[secondLastVersion].next = null; + delete releases[lastVersion]; + + console.log('Reverting %s'.gray, lastVersion); + return verifyAndUpload(env, releases, exit); + } + + var newVersion = options.amend ? lastVersion : semver.inc(lastVersion, 'patch'); + releases[lastVersion].next = newVersion; + + var newImageId = options.image ? parseInt(options.image, 10) : releases[lastVersion].imageId; + var sourceTarballUrl = options.code || releases[lastVersion].sourceTarballUrl; + var upgrade = options.upgrade || (releases[lastVersion].imageId !== newImageId); + + // check if we have a changelog otherwise + var changelog = parseChangelog(newVersion); + if (changelog.length === 0) console.log('No changelog for version %s found.'.yellow, newVersion.bold); + + var url = DIGITALOCEAN + '/images/' + newImageId; + superagent.get(url).set('Authorization', 'Bearer ' + env.digitalOceanToken).end(function (error, result) { + if (error || result.error) return exit(error || result.error); + + releases[newVersion] = { + sourceTarballUrl: sourceTarballUrl, + imageId: newImageId, + imageName: result.body.image.name, + changelog: changelog, + upgrade: upgrade, + date: (new Date()).toString(), + author: username + ' <' + email + '>', + next: null + }; + + verifyAndUpload(env, releases, function (error) { + if (error) return exit(error); + + console.log('%s : %s', newVersion, JSON.stringify(releases[newVersion], null, 4)); + + exit(); + }); + }); + }); +} + +function listRelease(options) { + var env = ENVIRONMENTS[options.env]; + if (!env) exit(new Error(util.format('Unknown environment %s', options.env))); + + var raw = !!options.raw; + + superagent.get(env.url).end(function (error, result) { + if (error || result.error) return exit(error || result.error); + + if (raw) { + console.log(JSON.stringify(result.body, null, 4)); + exit(null); + } + + console.log(''); + console.log('%s:'.gray, env.tag); + console.log(''); + + if (result.type !== 'application/json') { + console.log('The content type of the release file is %s. It should be application/json something might have gone wrong!'.red, result.type); + console.log('Trying to parse it anyway...'); + console.log(''); + result.body = safe.JSON.parse(result.text); + if (!result.body) { + console.log('Release file is not valid JSON!'.red); + exit(); + } + } + + if (Object.keys(result.body).length === 0) { + console.log('No releases'); + exit(null); + } + + var t = new Table(); + + for (var release in result.body) { + t.cell('Release', release); + t.cell('Image ID', result.body[release].imageId + (result.body[release].upgrade ? '*' : '')); + t.cell('Image Name', result.body[release].imageName); + t.cell('Date', result.body[release].date); + t.cell('Author', result.body[release].author); + t.cell('Next', result.body[release].next); + t.cell('Source', result.body[release].sourceTarballUrl.slice(result.body[release].sourceTarballUrl.lastIndexOf('/') + 1)); + t.newRow(); + } + + console.log(t.toString()); + + exit(null); + }); +} + +function touchRelease(options, callback) { + var env = ENVIRONMENTS[options.env]; + if (!env) exit(new Error(util.format('Unknown environment %s', options.env))); + + superagent.get(env.url).end(function (error, result) { + if (error || result.error) return exit(error || result.error); + + var latestVersion = Object.keys(result.body).sort(semver.rcompare)[0]; + result.body[latestVersion].date = (new Date()).toString(); + + verifyAndUpload(env, result.body, exit); + }); +} + +function listImages(token, callback) { + var images = []; + var nextPage = DIGITALOCEAN + '/images?private=true'; + + async.doWhilst(function (callback) { + superagent.get(nextPage).set('Authorization', 'Bearer ' + token).end(function (error, result) { + if (error || result.error) return callback(error || result.error); + + nextPage = (result.body.links && result.body.links.pages && nextPage !== result.body.links.pages.next) ? result.body.links.pages.next : null; + images = images.concat(result.body.images); + + callback(null); + }); + }, function () { return !!nextPage; }, function (error) { + if (error) return callback(error); + callback(null, images); + }); +} + +function sync(options) { + var destEnv = ENVIRONMENTS[options.env]; + if (!destEnv) exit(new Error(util.format('Unknown environment %s', options.env))); + + var sourceEnv; + + if (destEnv.tag === 'staging') sourceEnv = ENVIRONMENTS['prod']; + else if (destEnv.tag === 'dev') sourceEnv = ENVIRONMENTS['staging']; + else exit('Unable to determine source environment to sync from'); + + console.log('Syncing %s to %s', sourceEnv.tag.cyan.bold, destEnv.tag.cyan.bold); + + var S3 = new AWS.S3({ + accessKeyId: destEnv.accessKeyId, + secretAccessKey: destEnv.secretAccessKey + }); + + superagent.get(sourceEnv.url).end(function (error, result) { + if (error || result.error) exit(error || result.error); + + var sourceReleases = result.body; + var destReleases = {}; + + var params = { + Bucket: destEnv.releasesBucket, + Prefix: 'box-' + }; + + S3.listObjects(params, function(error, data) { + if (error) exit(error); + + var devSourceTarballs = data.Contents; + + listImages(destEnv.digitalOceanToken, function (error, images) { + if (error) exit(error); + + for (var release in sourceReleases) { + var match = sourceReleases[release].imageName.match(/box-(?:prod|staging|dev)-(.*)-\d\d\d\d-\d\d-\d\d/); + if (!match || !match[1]) exit('Unable to parse image name %s of release %s.', sourceReleases[release].imageName, release); + + var sourceImageRevision = match[1]; + + // find a suitable image and sourceTarballUrl on dev + var suitableImage = null; + var suitableSourceTarball = null; + + images.forEach(function (image) { + if (image.name.indexOf(util.format('box-%s-%s', destEnv.tag, sourceImageRevision)) === 0) { + suitableImage = image; + } + }); + + devSourceTarballs.forEach(function (tarball) { + if (sourceReleases[release].sourceTarballUrl.indexOf(tarball.Key) !== -1) { + suitableSourceTarball = 'https://' + destEnv.releasesBucket + '.s3.amazonaws.com/' + tarball.Key; + } + }); + + if (!suitableImage) { + console.log('Unable to find a suitable image on %s for release %s.', destEnv.tag, release); + console.log('Required image revision is %s', sourceImageRevision); + process.exit(1); + } + + if (!suitableSourceTarball) { + console.log('Unable to find a suitable source tarball on %s for release %s.', destEnv.tag, release); + console.log('Required source tarball is %s', sourceReleases[release].sourceTarballUrl.slice(sourceReleases[release].sourceTarballUrl.lastIndexOf('/') + 1)); + process.exit(1); + } + + destReleases[release] = { + sourceTarballUrl: suitableSourceTarball, + imageId: suitableImage.id, + imageName: suitableImage.name, + changelog: sourceReleases[release].changelog, + upgrade: sourceReleases[release].upgrade, + date: sourceReleases[release].date, + author: sourceReleases[release].author, + next: sourceReleases[release].next + }; + } + + console.log('Potential %s release file:', destEnv.tag); + console.log(''); + console.log(destReleases); + console.log(''); + + yesno.ask('Do you want to upload that release file? [y/N]', false, function (ok) { + if (!ok) process.exit(1); + + var params = { + Bucket: destEnv.releasesBucket, + Key: 'versions.json', + ACL: 'public-read', + Body: JSON.stringify(destReleases, null, 4), + ContentType: 'application/json' + }; + + S3.putObject(params, function(error, data) { + if (error) { + console.error(error); + process.exit(1); + } + + console.log('Upload successful.'); + process.exit(0); + }); + }); + }); + }); + }); +} + +function getImageByRevision(env, revision, callback) { + assert.strictEqual(typeof revision, 'string'); + assert.strictEqual(typeof callback, 'function'); + + var url = DIGITALOCEAN + '/images?per_page=100'; + superagent.get(url).set('Authorization', 'Bearer ' + env.digitalOceanToken).end(function (error, result) { + if (error || result.error) return exit(error || result.error); + + var images = result.body.images; + for (var i = 0; i < images.length; i++) { + if (images[i].name.indexOf('box-' + env.tag + '-' + revision) === 0) return callback(null, images[i]); + } + + callback(new Error('No image for ' + revision)); + }); +} + +function stage(fromEnv, toEnv) { + var username = execSync('git config user.name').toString('utf8').trim(); + var email = execSync('git config user.email').toString('utf8').trim(); + + console.log('Staging from %s -> %s'.gray, fromEnv.tag, toEnv.tag); + + superagent.get(fromEnv.url).end(function (error, result) { + if (error || result.error) return exit(error || result.error); + + var fromReleases = result.type === 'application/json' ? result.body : safe.JSON.parse(result.text); + if (!fromReleases) exit(new Error('versions.json is not valid JSON')); + + superagent.get(toEnv.url).end(function (error, result) { + if (error || result.error) return exit(error || result.error); + + var toReleases = result.type === 'application/json' ? result.body : safe.JSON.parse(result.text); + if (!toReleases) exit(new Error('versions.json is not valid JSON')); + + var latestFromVersion = Object.keys(fromReleases).sort(semver.rcompare)[0]; + var latestToVersion = Object.keys(toReleases).sort(semver.rcompare)[0]; + var nextVersion = semver.inc(latestToVersion, 'patch'); + + console.log('Releasing version %s to %s'.gray, nextVersion, toEnv.tag); + + // check if we even have a new version to stage + if (latestFromVersion === latestToVersion) exit(util.format('No new version on %s to stage.', fromEnv.tag)); + + // check if we have a changelog + var changelog = parseChangelog(nextVersion); + if (changelog.length === 0) exit(new Error('No changelog found for version ' + nextVersion)); + + var latestFromImageName = fromReleases[latestFromVersion].imageName; + var latestFromImageRevision = new RegExp('box-' + fromEnv.tag + '-([a-z,0-9.]+)-.*').exec(latestFromImageName)[1]; + + if (!latestFromImageRevision) exit('Unable to determine image revision'); + + getImageByRevision(toEnv, latestFromImageRevision, function (error, toImage) { + if (error) return exit(error); + + var sourceTarballName = url.parse(fromReleases[latestFromVersion].sourceTarballUrl).pathname.substr(1); + var upgrade = toReleases[latestToVersion].imageId !== toImage.id; + + console.log('Copying source code tarball %s to %s'.gray, sourceTarballName, toEnv.tag); + + var cmd = util.format( + 's3cmd get -v --ssl --access_key="%s" --secret_key="%s" "s3://%s/%s" - ' + + ' | s3cmd put -v --ssl --add-header=x-amz-acl:authenticated-read --access_key="%s" --secret_key="%s" - "s3://%s/%s"', + fromEnv.accessKeyId, fromEnv.secretAccessKey, fromEnv.releasesBucket, sourceTarballName, + toEnv.accessKeyId, toEnv.secretAccessKey, toEnv.releasesBucket, sourceTarballName + ); + + execSync(cmd, { stdio: [ null, process.stdout, process.stderr ] } ); + + toReleases[latestToVersion].next = nextVersion; + toReleases[nextVersion] = { + imageId: toImage.id, + imageName: toImage.name, + changelog: changelog, + upgrade: upgrade, + date: (new Date()).toString(), + sourceTarballUrl: 'https://' + toEnv.releasesBucket + '.s3.amazonaws.com/' + sourceTarballName, + author: username + ' <' + email + '>', + next: null + }; + + verifyAndUpload(toEnv, toReleases, function (error) { + if (error) return exit(error); + + console.log('%s : %s', nextVersion, JSON.stringify(toReleases[nextVersion], null, 4)); + + notifyAdmins(toEnv, toReleases, exit); + }); + }); + }); + }); +} + +program.version('0.0.1'); + +program.command('create') + .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .option('--code ', 'Source code url') + .option('--image ', 'Image id') + .option('--changelog ', 'Changelog') + .option('--upgrade', 'Set the upgrade flag') + .description('Create a new release') + .action(createRelease); + +program.command('revert') + .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .description('Revert the last release. Use with care') + .action(function (options) { options.revert = true; createRelease(options); }); + +program.command('new') + .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .option('--file ', 'Upload file as versions.json') + .description('Upload a new versions.json') + .action(newRelease); + +program.command('amend') + .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .option('--code ', 'Source code url') + .option('--image ', 'Image id') + .option('--changelog ', 'Changelog') + .option('--upgrade', 'Set the upgrade flag') + .description('Amend last release. Use with care') + .action(function (options) { options.amend = true; createRelease(options); }); + +program.command('rerelease') + .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .description('Make a new release, same as the last release') + .action(function (options) { options.rerelease = true; createRelease(options); }); + +program.command('list') + .option('--raw', 'Show raw json') + .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .description('List the releases file') + .action(listRelease); + +program.command('sync') + .option('--env ', 'Environment (dev/staging)', 'dev') + .description('Sync the specified env with the parent env (prod -> staging or staging -> dev)') + .action(sync); + +program.command('stage') + .description('Stage latest dev version to staging') + .action(stage.bind(null, ENVIRONMENTS['dev'], ENVIRONMENTS['staging'])); + +program.command('touch') + .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .description('Touch the releases file') + .action(touchRelease); + +program.command('publish') + .description('Publish latest staging version to production') + .action(stage.bind(null, ENVIRONMENTS['staging'], ENVIRONMENTS['prod'])); + +program.parse(process.argv); + +if (!process.argv.slice(2).length) { + program.outputHelp(); +} else { // https://github.com/tj/commander.js/issues/338 + var knownCommand = program.commands.some(function (command) { return command._name === process.argv[2]; }); + if (!knownCommand) { + console.error('Unknown command: ' + process.argv[2]); + process.exit(1); + } +} + diff --git a/release/versions.json b/release/versions.json new file mode 100644 index 000000000..b12a84030 --- /dev/null +++ b/release/versions.json @@ -0,0 +1,418 @@ +{ + "0.0.1": { + "sourceTarballUrl": "https://s3.amazonaws.com/cloudron-releases/box-5b369d2b78605140be63c8c2dc3e4af1ea6ae17b.tar.gz", + "imageId": 10504128, + "imageName": "box-e5d4524-2015-02-06-172850", + "changelog": [ + "Hot Chocolate" + ], + "upgrade": false, + "date": "Fri Feb 6 17:25:45 UTC 2015", + "next": "0.0.2" + }, + "0.0.2": { + "sourceTarballUrl": "https://s3.amazonaws.com/cloudron-releases/box-f2b6340c32c29e5e265abcd7044433d68ac0024c.tar.gz", + "imageId": 10504128, + "imageName": "box-e5d4524-2015-02-06-172850", + "changelog": [ + "Hotfix appstore ui in webadim" + ], + "upgrade": false, + "date": "Fri Feb 6 19:13:26 UTC 2015", + "next": "0.0.3" + }, + "0.0.3": { + "sourceTarballUrl": "https://s3.amazonaws.com/staging-cloudron-releases/box-20e43bdf9c6cf40d3412c59750bc43e834ec39d3.tar.gz", + "imageId": 10621904, + "imageName": "box-8c16ea0-2015-02-12-154005", + "changelog": [ + "Tall Pike" + ], + "upgrade": true, + "date": "Fri Feb 13 00:40:22 UTC 2015", + "next": "0.0.4" + }, + "0.0.4": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-20e43bdf9c6cf40d3412c59750bc43e834ec39d3.tar.gz", + "imageId": 10624164, + "imageName": "box-0ec7efa-2015-02-12-181028", + "changelog": [ + "Ghost release" + ], + "upgrade": false, + "date": "Tue Feb 17 18:03:31 UTC 2015", + "next": "0.0.5" + }, + "0.0.5": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-94383e98591934b648713eccfa67a3f7bbaf659b.tar.gz", + "imageId": 10694830, + "imageName": "box-24cfd4d-2015-02-18-140547", + "changelog": [ + "Banana Smoothie" + ], + "upgrade": true, + "date": "Thu Feb 19 00:13:35 UTC 2015", + "next": "0.0.6" + }, + "0.0.6": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-00dbdddce752d454a6a37b3b15eff9f24b0d8882.tar.gz", + "imageId": 10787693, + "imageName": "box-d7e153f-2015-02-25-192418", + "changelog": [ + "Chai Latte" + ], + "upgrade": true, + "date": "Thu Feb 26 04:19:48 UTC 2015", + "next": "0.0.7" + }, + "0.0.7": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-00dbdddce752d454a6a37b3b15eff9f24b0d8882.tar.gz", + "imageId": 10787693, + "imageName": "box-d7e153f-2015-02-25-192418", + "changelog": [ + "Rerelease for updating SSL certificates" + ], + "upgrade": true, + "date": "Fri Feb 27 07:49:36 UTC 2015", + "next": "0.0.8" + }, + "0.0.8": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-0b5b4535de027a0abccd823b75b4937ec4926d6c.tar.gz", + "imageId": 10881993, + "imageName": "box-3ad90f0-2015-03-04-155817", + "changelog": [ + "Orange Pekoe", + "It's all coming together!" + ], + "upgrade": true, + "date": "Thu Mar 5 00:23:34 UTC 2015", + "next": "0.0.9" + }, + "0.0.9": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-bbe982f14f861b39636ab37072d4e3b3c44a55ac.tar.gz", + "imageId": 10881993, + "imageName": "box-3ad90f0-2015-03-04-155817", + "changelog": [ + "Kashayam" + ], + "upgrade": false, + "date": "Mon Mar 9 23:21:42 UTC 2015", + "next": "0.0.10" + }, + "0.0.10": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-5aac5bd56fe8b917b198bdf7ec4b4bbe231e292c.tar.gz", + "imageId": 10881993, + "imageName": "box-3ad90f0-2015-03-04-155817", + "changelog": [ + "Hot fix for GitLab" + ], + "upgrade": false, + "date": "Tue Mar 10 02:40:53 UTC 2015", + "next": "0.0.11" + }, + "0.0.11": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-b50692fc670ef6ab1bd35d14076cf20fa48cf002.tar.gz", + "imageId": 10881993, + "imageName": "box-3ad90f0-2015-03-04-155817", + "changelog": [ + "Fix app updates" + ], + "upgrade": false, + "date": "Tue Mar 10 18:44:55 UTC 2015", + "next": "0.0.12" + }, + "0.0.12": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-062f94b335e8b57caf9ec4780402f023297fc1b7.tar.gz", + "imageId": 11055383, + "imageName": "box-4e04584-2015-03-17-161439", + "changelog": [ + "Port binding fixes" + ], + "upgrade": true, + "date": "Tue Mar 17 23:35:01 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.13" + }, + "0.0.13": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-ab0f9f691192c735825bb8aa04c1d246b22b067b.tar.gz", + "imageId": 11055383, + "imageName": "box-4e04584-2015-03-17-161439", + "changelog": [ + "Implement App ids" + ], + "upgrade": false, + "date": "Mon Mar 23 02:50:53 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.14" + }, + "0.0.14": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-4cabe44e998be028a86293827c58685f66ae2412.tar.gz", + "imageId": 11055383, + "imageName": "box-4e04584-2015-03-17-161439", + "changelog": [ + "Fix App updates" + ], + "upgrade": false, + "date": "Mon Mar 23 04:50:42 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.15" + }, + "0.0.15": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-d19af1ff6aa47cf2053b9e279078e0421579be57.tar.gz", + "imageId": 11055383, + "imageName": "box-4e04584-2015-03-17-161439", + "changelog": [ + "Fix manifest format" + ], + "upgrade": false, + "date": "Mon Mar 23 05:53:05 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.16" + }, + "0.0.16": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-d19af1ff6aa47cf2053b9e279078e0421579be57.tar.gz", + "imageId": 11162711, + "imageName": "box-e34c6ce-2015-03-25-121127", + "changelog": [ + "Image upgrade with newer addons" + ], + "upgrade": false, + "date": "Wed Mar 25 19:33:45 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.17" + }, + "0.0.17": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-5c1fb62adb35fc311565eb6495dc2985cfc6dc3d.tar.gz", + "imageId": 11162711, + "imageName": "box-e34c6ce-2015-03-25-121127", + "changelog": [ + "Subdomain API changes" + ], + "upgrade": false, + "date": "Wed Mar 25 19:41:28 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.18" + }, + "0.0.18": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-d609c0b3052422813a03a68403c4def47f2ffcba.tar.gz", + "imageId": 11162711, + "imageName": "box-e34c6ce-2015-03-25-121127", + "changelog": [ + "Reorg app data" + ], + "upgrade": false, + "date": "Wed Mar 25 20:41:25 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.19" + }, + "0.0.19": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-9b38e9a6f2e2abad725fedd4f78323619661ed55.tar.gz", + "imageId": 11162711, + "imageName": "box-e34c6ce-2015-03-25-121127", + "changelog": [ + "Delegate app dir creation" + ], + "upgrade": false, + "date": "Thu Mar 26 03:10:53 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.20" + }, + "0.0.20": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-b1381263d3e243eb986d4d9cbf7d2853171941bf.tar.gz", + "imageId": 11162711, + "imageName": "box-e34c6ce-2015-03-25-121127", + "changelog": [ + "Change backup strategy" + ], + "upgrade": false, + "date": "Wed Apr 1 08:18:50 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.21" + }, + "0.0.21": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-0db08cf3efc8345f90d41203cc18f747f42499fa.tar.gz", + "imageId": 11162711, + "imageName": "box-e34c6ce-2015-03-25-121127", + "changelog": [ + "Remove hacks for backup migration" + ], + "upgrade": false, + "date": "Wed Apr 1 08:38:52 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.22" + }, + "0.0.22": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-9cbaef5b41df6a9f5c597448427fe706d2fb0221.tar.gz", + "imageId": 11282641, + "imageName": "box-28a9001-2015-04-02-193801", + "changelog": [ + "Backup fixes" + ], + "upgrade": true, + "date": "Fri Apr 3 02:58:35 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.23" + }, + "0.0.23": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-038ae04f9edbe5727931eace1a62b083c6cebd79.tar.gz", + "imageId": 11384175, + "imageName": "box-1c48a4b-2015-04-09-231714", + "changelog": [ + "Polish, polish, polish" + ], + "upgrade": true, + "date": "Fri Apr 10 06:46:56 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.24" + }, + "0.0.24": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-a168846a056e03f5804cc7b0de8bf9438aa0a4a5.tar.gz", + "imageId": 11390303, + "imageName": "box-94f1086-2015-04-10-174805", + "changelog": [ + "Upgrade all apps and containers" + ], + "upgrade": true, + "date": "Sat Apr 11 01:10:31 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.25" + }, + "0.0.25": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-0a82fcb15f060aa53c1ec0f767d3e56f707416aa.tar.gz", + "imageId": 11390303, + "imageName": "box-94f1086-2015-04-10-174805", + "changelog": [ + "Fix backup creation issue caused by execFile buffer overflow" + ], + "upgrade": false, + "date": "Mon Apr 13 05:00:04 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.26" + }, + "0.0.26": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-d8ac0330e8c9821c5094d7ba985cbece963489c1.tar.gz", + "imageId": 11390303, + "imageName": "box-94f1086-2015-04-10-174805", + "changelog": [ + "Update manifestformat" + ], + "upgrade": false, + "date": "Mon Apr 13 19:54:17 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.27" + }, + "0.0.27": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-c9b7d9a0181911fce6cbc79f5cc965affa2ab2ae.tar.gz", + "imageId": 11390303, + "imageName": "box-94f1086-2015-04-10-174805", + "changelog": [ + "_docker", + "Rickshaw inclusion" + ], + "upgrade": false, + "date": "Fri Apr 17 16:37:25 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.28" + }, + "0.0.28": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-c9b7d9a0181911fce6cbc79f5cc965affa2ab2ae.tar.gz", + "imageId": 11463691, + "imageName": "box-f620aed-2015-04-17-090956", + "changelog": [ + "docker image cleanup" + ], + "upgrade": true, + "date": "Fri Apr 17 16:40:53 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.29" + }, + "0.0.29": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-b22136f11b0b7f51f718aad18904725a6f5c95db.tar.gz", + "imageId": 11463691, + "imageName": "box-f620aed-2015-04-17-090956", + "changelog": [ + "Fix app states" + ], + "upgrade": false, + "date": "Tue Apr 21 17:27:00 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.30" + }, + "0.0.30": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-73de1a7c4e422e4f320b8e8c336e06b48eee1241.tar.gz", + "imageId": 11535902, + "imageName": "box-abaa2f6-2015-04-22-115250", + "changelog": [ + "Fix nginx and collectd configuration setup", + "Foundation for updating box without app restart" + ], + "upgrade": true, + "date": "Wed Apr 22 21:44:00 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.31" + }, + "0.0.31": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-a36d6d864f8905706b94bf7f0c3e50ae0ae857f4.tar.gz", + "imageId": 11535902, + "imageName": "box-abaa2f6-2015-04-22-115250", + "changelog": [ + "Do not restart apps for box update" + ], + "upgrade": false, + "date": "Thu Apr 23 02:51:43 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.32" + }, + "0.0.32": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-69f9bee6777db74416530aba09541243d215194e.tar.gz", + "imageId": 11535902, + "imageName": "box-abaa2f6-2015-04-22-115250", + "changelog": [ + "Fix nginx templating", + "Implement infrastructure versioning" + ], + "upgrade": false, + "date": "Fri Apr 24 18:21:42 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.33" + }, + "0.0.33": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-11aecac6fde56f2289d711e4dee60baa1ccf4d00.tar.gz", + "imageId": 11567620, + "imageName": "box-e877472-2015-04-24-160626", + "changelog": [ + "Fix bug in retire", + "Fix non-infra upgrade" + ], + "upgrade": true, + "date": "Sat Apr 25 01:43:44 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.34" + }, + "0.0.34": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-88bbe86000066eb264c2fa00a55d3b9ddd3e50dd.tar.gz", + "imageId": 11567620, + "imageName": "box-e877472-2015-04-24-160626", + "changelog": [ + "Fix addons ownership", + "Fix backup for upgrades" + ], + "upgrade": false, + "date": "Sat Apr 25 03:34:48 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": "0.0.35" + }, + "0.0.35": { + "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-7b7df404b7e7463853d440ba07becceac49c1888.tar.gz", + "imageId": 11876727, + "imageName": "box-cc1f57c-2015-05-15-200507", + "changelog": [ + "Change restart policy of containers to always", + "WARNING! This is an upgrade your cloudron will restart" + ], + "upgrade": true, + "date": "Fri May 15 17:17:40 UTC 2015", + "author": "Girish Ramakrishnan ", + "next": null + } +} diff --git a/release/versionsformat.js b/release/versionsformat.js new file mode 100644 index 000000000..290907018 --- /dev/null +++ b/release/versionsformat.js @@ -0,0 +1,65 @@ +#!/usr/bin/env node + +'use strict'; + +var fs = require('fs'), + safe = require('safetydance'), + semver = require('semver'), + util = require('util'), + url = require('url'); + +exports = module.exports = { + verifyFile: verifyFile, + verify: verify +}; + +function verify(versionsJson) { + if (!versionsJson || typeof versionsJson !== 'object') return new Error('versions must be valid object'); + + // check all the keys + var sortedVersions = Object.keys(versionsJson).sort(semver.compare); + for (var i = 0; i < sortedVersions.length; i++) { + var version = sortedVersions[i]; + if (typeof versionsJson[version].imageId !== 'number') return new Error('version ' + version + ' does not have proper imageId'); + + if (typeof versionsJson[version].imageName !== 'string' || !versionsJson[version].imageName.length) return new Error('version ' + version + ' does not have proper imageName'); + + if ('changeLog' in versionsJson[version] && !util.isArray(versionsJson[version].changeLog)) return new Error('version ' + version + ' does not have proper changeLog'); + + if (typeof versionsJson[version].date !== 'string' || ((new Date(versionsJson[version].date)).toString() === 'Invalid Date')) return new Error('invalid date or missing date'); + + if (versionsJson[version].next !== null && typeof versionsJson[version].next !== 'string') return new Error('version ' + version + ' does not have proper next'); + + if (typeof versionsJson[version].sourceTarballUrl !== 'string') return new Error('version ' + version + ' does not have proper sourceTarballUrl'); + + if ('author' in versionsJson[version] && typeof versionsJson[version].author !== 'string') return new Error('author must be a string'); + + var tarballUrl = url.parse(versionsJson[version].sourceTarballUrl); + if (tarballUrl.protocol !== 'https:') return new Error('sourceTarballUrl must be https'); + if (!/.tar.gz$/.test(tarballUrl.path)) return new Error('sourceTarballUrl must be tar.gz'); + + var nextVersion = versionsJson[version].next; + // despite having the 'next' field, the appstore code currently relies on all versions being sorted based on semver.compare (see boxversions.js) + if (nextVersion && semver.gt(version, nextVersion)) return new Error('next version cannot be less than current @' + version); + } + + // check that package.json version is in versions.json + var currentVersion = require('../package.json').version; + if (sortedVersions.indexOf(currentVersion) === -1) { + return new Error('package.json version is not present in versions.json'); + } + + return null; +} + +function verifyFile(versionsFileName) { + // check if the json is valid + var versions = safe.JSON.parse(fs.readFileSync(versionsFileName)); + if (!versions) { + return new Error(versionsFileName + ' is not valid json : ' + safe.error); + } + + return verify(versions); +} + + diff --git a/src/announce.js b/src/announce.js new file mode 100644 index 000000000..7eea6a32c --- /dev/null +++ b/src/announce.js @@ -0,0 +1,65 @@ +/* jslint node: true */ + +'use strict'; + +var assert = require('assert'), + debug = require('debug')('installer:announce'), + fs = require('fs'), + os = require('os'), + superagent = require('superagent'); + +exports = module.exports = { + start: start, + stop: stop +}; + +var gAnnounceTimerId = null; + +var ANNOUNCE_INTERVAL = parseInt(process.env.ANNOUNCE_INTERVAL, 10) || 60000; // exported for testing + +function start(apiServerOrigin, callback) { + assert.strictEqual(typeof apiServerOrigin, 'string'); + assert.strictEqual(typeof callback, 'function'); + + if (fs.existsSync('/home/yellowtent/box')) { + debug('already provisioned, skipping announce'); + return callback(null); + } + + debug('started'); + + gAnnounceTimerId = setInterval(doAnnounce.bind(null, apiServerOrigin), ANNOUNCE_INTERVAL); + doAnnounce(apiServerOrigin); + + callback(null); +} + +function stop(callback) { + assert.strictEqual(typeof callback, 'function'); + + debug('Stopping announce'); + + clearInterval(gAnnounceTimerId); + gAnnounceTimerId = null; + + callback(null); +} + +function doAnnounce(apiServerOrigin) { + // On Digital Ocean, the only value which we can give a new droplet is the hostname. + // We use that value to identify the droplet by the appstore server when the droplet + // announce itself. This identifier can look different for other box providers. + var hostname = os.hostname(); + var url = apiServerOrigin + '/api/v1/boxes/' + hostname + '/announce'; + debug('box with %s.', url); + + superagent.get(url).timeout(10000).end(function (error, result) { + if (error || result.statusCode !== 200) { + debug('unable to announce to app server, try again.', error); + return; + } + + debug('success'); + }); +} + diff --git a/src/installer.js b/src/installer.js new file mode 100644 index 000000000..f2a2bffe1 --- /dev/null +++ b/src/installer.js @@ -0,0 +1,100 @@ +/* jslint node: true */ + +'use strict'; + +var assert = require('assert'), + child_process = require('child_process'), + debug = require('debug')('installer:installer'), + path = require('path'), + util = require('util'); + +exports = module.exports = { + InstallerError: InstallerError, + + provision: provision, + restore: restore, + update: update, + retire: retire +}; + +var INSTALLER_CMD = path.join(__dirname, 'scripts/installer.sh'), + RETIRE_CMD = path.join(__dirname, 'scripts/retire.sh'), + SUDO = '/usr/bin/sudo'; + +function InstallerError(reason, info) { + Error.call(this); + Error.captureStackTrace(this, this.constructor); + + this.name = this.constructor.name; + this.reason = reason; + this.message = !info ? reason : (typeof info === 'object' ? JSON.stringify(info) : info); +} +util.inherits(InstallerError, Error); +InstallerError.INTERNAL_ERROR = 1; +InstallerError.ALREADY_PROVISIONED = 2; + +function update(args, callback) { + provision(args, callback); +} + +function restore(args, callback) { + provision(args, callback); +} + +function spawn(tag, cmd, args, callback) { + assert.strictEqual(typeof tag, 'string'); + assert.strictEqual(typeof cmd, 'string'); + assert(util.isArray(args)); + assert.strictEqual(typeof callback, 'function'); + + var cp = child_process.spawn(cmd, args, { timeout: 0 }); + cp.stdout.setEncoding('utf8'); + cp.stdout.on('data', function (data) { debug('%s (stdout): %s', tag, data); }); + cp.stderr.setEncoding('utf8'); + cp.stderr.on('data', function (data) { debug('%s (stderr): %s', tag, data); }); + + cp.on('error', function (error) { + debug('%s : child process errored %s', tag, error.message); + callback(error); + }); + + cp.on('exit', function (code, signal) { + debug('%s : child process exited. code: %d signal: %d', tag, code, signal); + if (signal) return callback(new Error('Exited with signal ' + signal)); + if (code !== 0) return callback(new Error('Exited with code ' + code)); + + callback(null); + }); +} + +function retire(args, callback) { + assert.strictEqual(typeof args, 'object'); + assert.strictEqual(typeof callback, 'function'); + + var pargs = [ RETIRE_CMD ]; + pargs.push('--data', JSON.stringify(args.data)); + + debug('retire: calling with args %j', pargs); + + if (process.env.NODE_ENV === 'test') return callback(null); + + // sudo is required for retire() + spawn('retire', SUDO, pargs, callback); +} + +function provision(args, callback) { + assert.strictEqual(typeof args, 'object'); + assert.strictEqual(typeof callback, 'function'); + + var pargs = [ INSTALLER_CMD ]; + pargs.push('--sourcetarballurl', args.sourceTarballUrl); + pargs.push('--data', JSON.stringify(args.data)); + + debug('provision: calling with args %j', pargs); + + if (process.env.NODE_ENV === 'test') return callback(null); + + // sudo is required for update() + spawn('provision', SUDO, pargs, callback); +} + diff --git a/src/scripts/installer.sh b/src/scripts/installer.sh new file mode 100755 index 000000000..26286b8f4 --- /dev/null +++ b/src/scripts/installer.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +set -eu -o pipefail + +readonly BOX_SRC_DIR=/home/yellowtent/box +readonly DATA_DIR=/home/yellowtent/data + +readonly script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +readonly json="${script_dir}/../../node_modules/.bin/json" +readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400" + +readonly is_update=$([[ -d "${BOX_SRC_DIR}" ]] && echo "yes" || echo "no") + +# create a provision file for testing. %q escapes args. %q is reused as much as necessary to satisfy $@ +(echo -e "#!/bin/bash\n"; printf "%q " "${script_dir}/installer.sh" "$@") > /home/yellowtent/provision.sh +chmod +x /home/yellowtent/provision.sh + +arg_source_tarball_url="" +arg_data="" + +args=$(getopt -o "" -l "sourcetarballurl:,data:" -n "$0" -- "$@") +eval set -- "${args}" + +while true; do + case "$1" in + --sourcetarballurl) arg_source_tarball_url="$2";; + --data) arg_data="$2";; + --) break;; + *) echo "Unknown option $1"; exit 1;; + esac + + shift 2 +done + +box_src_tmp_dir=$(mktemp -dt box-src-XXXXXX) +echo "Downloading box code from ${arg_source_tarball_url} to ${box_src_tmp_dir}" + +while true; do + if $curl -L "${arg_source_tarball_url}" | tar -zxf - -C "${box_src_tmp_dir}"; then break; fi + echo "Failed to download source tarball, trying again" + sleep 5 +done +(cd "${box_src_tmp_dir}" && npm rebuild) + +if [[ "${is_update}" == "yes" ]]; then + echo "Setting up update splash screen" + "${box_src_tmp_dir}/setup/splashpage.sh" --data "${arg_data}" # show splash from new code + ${BOX_SRC_DIR}/setup/stop.sh # stop the old code +fi + +# switch the codes +rm -rf "${BOX_SRC_DIR}" +mv "${box_src_tmp_dir}" "${BOX_SRC_DIR}" +chown -R yellowtent.yellowtent "${BOX_SRC_DIR}" + +# create a start file for testing. %q escapes args +(echo -e "#!/bin/bash\n"; printf "%q " "${BOX_SRC_DIR}/setup/start.sh" --data "${arg_data}") > /home/yellowtent/setup_start.sh +chmod +x /home/yellowtent/setup_start.sh + +echo "Calling box setup script" +"${BOX_SRC_DIR}/setup/start.sh" --data "${arg_data}" + diff --git a/src/scripts/retire.sh b/src/scripts/retire.sh new file mode 100755 index 000000000..1d8363eca --- /dev/null +++ b/src/scripts/retire.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# This script is called once at the end of a cloudrons lifetime + +set -eu -o pipefail + +readonly BOX_SRC_DIR=/home/yellowtent/box + +arg_data="" + +args=$(getopt -o "" -l "data:" -n "$0" -- "$@") +eval set -- "${args}" + +while true; do + case "$1" in + --data) arg_data="$2";; + --) break;; + *) echo "Unknown option $1"; exit 1;; + esac + + shift 2 +done + +echo "Setting up splash screen" +"${BOX_SRC_DIR}/setup/splashpage.sh" --retire --data "${arg_data}" # show splash +"${BOX_SRC_DIR}/setup/stop.sh" # stop the cloudron code diff --git a/src/server.js b/src/server.js new file mode 100755 index 000000000..f232ab02d --- /dev/null +++ b/src/server.js @@ -0,0 +1,293 @@ +#!/usr/bin/env node + +/* jslint node: true */ + +'use strict'; + +var announce = require('./announce.js'), + assert = require('assert'), + async = require('async'), + debug = require('debug')('installer:server'), + express = require('express'), + fs = require('fs'), + http = require('http'), + HttpError = require('connect-lastmile').HttpError, + https = require('https'), + HttpSuccess = require('connect-lastmile').HttpSuccess, + installer = require('./installer.js'), + json = require('body-parser').json, + lastMile = require('connect-lastmile'), + morgan = require('morgan'), + path = require('path'), + safe = require('safetydance'), + superagent = require('superagent'), + ts = require('tail-stream'); + +exports = module.exports = { + start: start, + stop: stop +}; + +var gHttpsServer = null, // provision server; used for install/restore + gHttpServer = null; // update server; used for updates + +// 'data' is opaque. the following code exists to help debugging +function checkData(data) { + assert.strictEqual(typeof data, 'object'); + + if (typeof data.token !== 'string') console.error('No token provided'); + if (typeof data.apiServerOrigin !== 'string') console.error('No apiServerOrigin provided'); + if (typeof data.webServerOrigin !== 'string') console.error('No webServerOrigin provided'); + if (typeof data.fqdn !== 'string') console.error('No fqdn provided'); + if (typeof data.tlsCert !== 'string') console.error('No TLS cert provided'); + if (typeof data.tlsKey !== 'string') console.error('No TLS key provided'); + if (typeof data.isCustomDomain !== 'boolean') console.error('No isCustomDomain provided'); + if (typeof data.version !== 'string') console.error('No version provided'); + if (typeof data.sourceTarballUrl !== 'string') console.error('No sourceTarballUrl provided'); + + if ('restoreUrl' in data) { + if (typeof data.restoreUrl !== 'string') console.error('No restoreUrl provided'); + if (typeof data.restoreKey !== 'string') console.error('No restoreKey provided'); + } +} + +function restore(req, res, next) { + assert.strictEqual(typeof req.body, 'object'); + + if (typeof req.body.sourceTarballUrl !== 'string') return next(new HttpError(400, 'No sourceTarballUrl provided')); + + if (!req.body.data || typeof req.body.data !== 'object') return next(new HttpError(400, 'No data provided')); + + checkData(req.body.data); + + debug('restore: received from appstore %j', req.body); + + installer.restore(req.body, function (error) { + if (error) console.error(error); + }); + + announce.stop(function () { }); + + next(new HttpSuccess(202, { })); +} + +function provision(req, res, next) { + assert.strictEqual(typeof req.body, 'object'); + + if (typeof req.body.sourceTarballUrl !== 'string') return next(new HttpError(400, 'No sourceTarballUrl provided')); + + if (!req.body.data || typeof req.body.data !== 'object') return next(new HttpError(400, 'No data provided')); + + checkData(req.body.data); + + debug('provision: received from appstore %j', req.body); + + installer.provision(req.body, function (error) { + if (error) console.error(error); + }); + + announce.stop(function () { }); + + next(new HttpSuccess(202, { })); +} + +function update(req, res, next) { + assert.strictEqual(typeof req.body, 'object'); + + if (typeof req.body.sourceTarballUrl !== 'string') return next(new HttpError(400, 'No sourceTarballUrl provided')); + + if (!req.body.data || typeof req.body.data !== 'object') return next(new HttpError(400, 'No data provided')); + + checkData(req.body.data); + + debug('update: started'); + + installer.update(req.body, function (error) { + if (error) console.error(error); + }); + + next(new HttpSuccess(202, { })); +} + +function retire(req, res, next) { + assert.strictEqual(typeof req.body, 'object'); + + if (!req.body.data || typeof req.body.data !== 'object') return next(new HttpError(400, 'No data provided')); + + if (typeof req.body.data.tlsCert !== 'string') console.error('No TLS cert provided'); + if (typeof req.body.data.tlsKey !== 'string') console.error('No TLS key provided'); + + debug('retire: received from appstore %j', req.body); + + installer.retire(req.body, function (error) { + if (error) console.error(error); + }); + + next(new HttpSuccess(202, {})); +} + +function logs(req, res, next) { + if (!req.query.filename) return next(new HttpError(400, 'No filename provided')); + var tail = req.query.tail === 'true'; + var stream = null; + + var stat = safe.fs.statSync(req.query.filename); + + if (!stat) return res.status(404).send('Not found'); + + if (tail) { + var tailStreamOptions = { + beginAt: 'end', + onMove: 'follow', + detectTruncate: true, + onTruncate: 'end', + endOnError: true + }; + + stream = safe(function () { return ts.createReadStream(req.query.filename, tailStreamOptions); }); + stream.destroy = stream.end; // tail-stream closes it's watchers with this special API + } else { + stream = fs.createReadStream(req.query.filename); + res.set('content-length', stat.size); + } + + if (!stream) return res.status(404).send(safe.error.message); + + stream.on('error', function (error) { res.write(error.message); res.end(); }); + res.on('close', function () { stream.destroy(); }); + res.status(200); + stream.pipe(res); +} + +function backup(req, res, next) { + // !! below port has to be in sync with box/config.js internalPort + superagent.post('http://127.0.0.1:3001/api/v1/backup').end(function (error, result) { + if (error) return next(new HttpError(500, error)); + if (result.statusCode !== 202) return next(new HttpError(500, 'trigger backup failed with ' + result.statusCode)); + next(new HttpSuccess(202, {})); + }); +} + +function startUpdateServer(callback) { + assert.strictEqual(typeof callback, 'function'); + + debug('Starting update server'); + + var app = express(); + + var router = new express.Router(); + + if (process.env.NODE_ENV !== 'test') app.use(morgan('dev', { immediate: false })); + + app.use(json({ strict: true })) + .use(router) + .use(lastMile()); + + router.post('/api/v1/installer/update', update); + + gHttpServer = http.createServer(app); + gHttpServer.on('error', console.error); + + gHttpServer.listen(2020, '127.0.0.1', callback); +} + +function startProvisionServer(callback) { + assert.strictEqual(typeof callback, 'function'); + + debug('Starting provision server'); + + var app = express(); + + var router = new express.Router(); + + if (process.env.NODE_ENV !== 'test') app.use(morgan('dev', { immediate: false })); + + app.use(json({ strict: true })) + .use(router) + .use(lastMile()); + + router.post('/api/v1/installer/provision', provision); + router.post('/api/v1/installer/restore', restore); + router.post('/api/v1/installer/retire', retire); + router.get ('/api/v1/installer/logs', logs); + router.post('/api/v1/installer/backup', backup); + router.post('/api/v1/installer/update', update); + + var caPath = path.join(__dirname, process.env.NODE_ENV === 'test' ? '../../keys/installer_ca' : 'certs'); + var certPath = path.join(__dirname, process.env.NODE_ENV === 'test' ? '../../keys/installer' : 'certs'); + + var options = { + key: fs.readFileSync(path.join(certPath, 'server.key')), + cert: fs.readFileSync(path.join(certPath, 'server.crt')), + ca: fs.readFileSync(path.join(caPath, 'ca.crt')), + + // request cert from client and only allow from our CA + requestCert: true, + rejectUnauthorized: process.env.NODE_TLS_REJECT_UNAUTHORIZED !== '0' // this is set in the tests + }; + + gHttpsServer = https.createServer(options, app); + gHttpsServer.on('error', console.error); + + gHttpsServer.listen(process.env.NODE_ENV === 'test' ? 4443 : 886, '0.0.0.0', callback); +} + +function stopProvisionServer(callback) { + assert.strictEqual(typeof callback, 'function'); + + debug('Stopping provision server'); + + if (!gHttpsServer) return callback(null); + + gHttpsServer.close(callback); + gHttpsServer = null; +} + +function stopUpdateServer(callback) { + assert.strictEqual(typeof callback, 'function'); + + debug('Stopping update server'); + + if (!gHttpServer) return callback(null); + + gHttpServer.close(callback); + gHttpServer = null; +} + +function start(callback) { + assert.strictEqual(typeof callback, 'function'); + + debug('starting'); + + superagent.get('http://169.254.169.254/metadata/v1.json').end(function (error, result) { + if (error || result.statusCode !== 200) { + console.error('Error getting metadata', error); + return; + } + + var apiServerOrigin = JSON.parse(result.body.user_data).apiServerOrigin; + debug('Using apiServerOrigin from metadata: %s', apiServerOrigin); + + async.series([ + announce.start.bind(null, apiServerOrigin), + startUpdateServer, + startProvisionServer + ], callback); + }); +} + +function stop(callback) { + assert.strictEqual(typeof callback, 'function'); + + async.series([ + announce.stop, + stopUpdateServer, + stopProvisionServer + ], callback); +} + +if (require.main === module) { + start(function (error) { + if (error) console.error(error); + }); +} diff --git a/src/test/installer-test.js b/src/test/installer-test.js new file mode 100644 index 000000000..b53b7e40e --- /dev/null +++ b/src/test/installer-test.js @@ -0,0 +1,332 @@ +/* jslint node:true */ +/* global it:false */ +/* global describe:false */ +/* global before:false */ +/* global after:false */ + +'use strict'; + +var expect = require('expect.js'), + fs = require('fs'), + path = require('path'), + nock = require('nock'), + os = require('os'), + request = require('superagent'), + server = require('../server.js'), + _ = require('lodash'); + +var EXTERNAL_SERVER_URL = 'https://localhost:4443'; +var INTERNAL_SERVER_URL = 'http://localhost:2020'; +var APPSERVER_ORIGIN = 'http://appserver'; +var FQDN = os.hostname(); + +describe('Server', function () { + this.timeout(5000); + + before(function (done) { + var user_data = JSON.stringify({ apiServerOrigin: APPSERVER_ORIGIN }); // user_data is a string + var scope = nock('http://169.254.169.254') + .persist() + .get('/metadata/v1.json') + .reply(200, JSON.stringify({ user_data: user_data }), { 'Content-Type': 'application/json' }); + done(); + }); + + after(function (done) { + nock.cleanAll(); + done(); + }); + + describe('starts and stop', function () { + it('starts', function (done) { + server.start(done); + }); + + it('stops', function (done) { + server.stop(done); + }); + }); + + describe('update (internal server)', function () { + before(function (done) { + server.start(done); + }); + after(function (done) { + server.stop(done); + }); + + it('does not respond to provision', function (done) { + request.post(INTERNAL_SERVER_URL + '/api/v1/installer/provision').send({ }).end(function (error, result) { + expect(error).to.not.be.ok(); + expect(result.statusCode).to.equal(404); + done(); + }); + }); + + it('does not respond to restore', function (done) { + request.post(INTERNAL_SERVER_URL + '/api/v1/installer/restore').send({ }).end(function (error, result) { + expect(error).to.not.be.ok(); + expect(result.statusCode).to.equal(404); + done(); + }); + }); + + var data = { + sourceTarballUrl: "https://foo.tar.gz", + + data: { + token: 'sometoken', + apiServerOrigin: APPSERVER_ORIGIN, + webServerOrigin: 'https://somethingelse.com', + fqdn: 'www.something.com', + tlsKey: 'key', + tlsCert: 'cert', + boxVersionsUrl: 'https://versions.json', + version: '0.1' + } + }; + + Object.keys(data).forEach(function (key) { + it('fails due to missing ' + key, function (done) { + var dataCopy = _.merge({ }, data); + delete dataCopy[key]; + + request.post(INTERNAL_SERVER_URL + '/api/v1/installer/update').send(dataCopy).end(function (error, result) { + expect(error).to.not.be.ok(); + expect(result.statusCode).to.equal(400); + done(); + }); + }); + }); + + it('succeeds', function (done) { + request.post(INTERNAL_SERVER_URL + '/api/v1/installer/update').send(data).end(function (error, result) { + expect(error).to.not.be.ok(); + expect(result.statusCode).to.equal(202); + done(); + }); + }); + }); + + describe('provision - announce', function () { + var failingGet = null; + + before(function (done) { + process.env.ANNOUNCE_INTERVAL = 20; + + var scope = nock(APPSERVER_ORIGIN); + failingGet = scope.get('/api/v1/boxes/' + FQDN + '/announce'); + failingGet.times(5).reply(502); + + server.start(done); + }); + + after(function (done) { + process.env.ANNOUNCE_INTERVAL = 60000; + // failingGet.removeInterceptor({ hostname: 'appserver' }); + server.stop(done); + }); + + it('sends announce request repeatedly', function (done) { + setTimeout(function () { + expect(failingGet.counter).to.be.below(6); // counter is nock update + done(); + }, 100); + }); + }); + + describe('provision - restore', function () { + var data = { + sourceTarballUrl: 'https://sourceTarballUrl', + + data: { + boxVersionsUrl: 'https://versions.json', + version: '0.1', + restoreUrl: 'https://restoreurl', + restoreKey: 'somebackupkey', + token: 'sometoken', + apiServerOrigin: APPSERVER_ORIGIN, + webServerOrigin: 'https://somethingelse.com', + fqdn: 'www.something.com', + tlsKey: 'key', + tlsCert: 'cert' + } + }; + + before(function (done) { + process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; // TODO: use a installer ca signed cert instead + server.start(done); + }); + + after(function (done) { + server.stop(done); + delete process.env.NODE_TLS_REJECT_UNAUTHORIZED; + }); + + Object.keys(data).forEach(function (key) { + it('fails due to missing ' + key, function (done) { + var dataCopy = _.merge({ }, data); + delete dataCopy[key]; + + request.post(EXTERNAL_SERVER_URL + '/api/v1/installer/restore').send(dataCopy).end(function (error, result) { + expect(error).to.not.be.ok(); + expect(result.statusCode).to.equal(400); + done(); + }); + }); + }); + + it('succeeds', function (done) { + request.post(EXTERNAL_SERVER_URL + '/api/v1/installer/restore').send(data).end(function (error, result) { + expect(error).to.not.be.ok(); + expect(result.statusCode).to.equal(202); + done(); + }); + }); + }); + + describe('provision - provision', function () { + var data = { + sourceTarballUrl: 'https://sourceTarballUrl', + + data: { + boxVersionsUrl: 'https://versions.json', + version: '0.1', + token: 'sometoken', + apiServerOrigin: APPSERVER_ORIGIN, + webServerOrigin: 'https://somethingelse.com', + fqdn: 'www.something.com', + tlsKey: 'key', + tlsCert: 'cert' + } + }; + + before(function (done) { + process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; // TODO: use a installer ca signed cert instead + server.start(done); + }); + + after(function (done) { + server.stop(done); + delete process.env.NODE_TLS_REJECT_UNAUTHORIZED; + }); + + Object.keys(data).forEach(function (key) { + it('fails due to missing ' + key, function (done) { + var dataCopy = _.merge({ }, data); + delete dataCopy[key]; + + request.post(EXTERNAL_SERVER_URL + '/api/v1/installer/provision').send(dataCopy).end(function (error, result) { + expect(error).to.not.be.ok(); + expect(result.statusCode).to.equal(400); + done(); + }); + }); + }); + + it('succeeds', function (done) { + request.post(EXTERNAL_SERVER_URL + '/api/v1/installer/provision').send(data).end(function (error, result) { + expect(error).to.not.be.ok(); + expect(result.statusCode).to.equal(202); + done(); + }); + }); + }); + + describe('logs', function () { + before(function (done) { + process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; // TODO: use a installer ca signed cert instead + server.start(done); + }); + + after(function (done) { + server.stop(done); + delete process.env.NODE_TLS_REJECT_UNAUTHORIZED; + }); + + it('needs filename', function (done) { + request.get(EXTERNAL_SERVER_URL + '/api/v1/installer/logs').end(function (error, result) { + expect(!error).to.be.ok(); + expect(result.statusCode).to.equal(400); + done(); + }); + }); + + it('returns stream for valid file', function (done) { + request.get(EXTERNAL_SERVER_URL + '/api/v1/installer/logs?filename=' + __filename).end(function (error, result) { + expect(!error).to.be.ok(); + expect(result.headers['content-length']).to.be('' + fs.statSync(__filename).size); + expect(result.statusCode).to.equal(200); + done(); + }); + }); + + it('returns tail stream for valid file', function (done) { + var tailFile = path.join(os.tmpdir(), 'test-tail'); + fs.writeFileSync(tailFile, 'line 1\n'); + + var res = request.get(EXTERNAL_SERVER_URL + '/api/v1/installer/logs?tail=true&filename=' + tailFile).end(function (error, result) { + expect(!error).to.be.ok(); + expect(result.headers['transfer-encoding']).to.be('chunked'); + expect(result.statusCode).to.equal(200); + + fs.unlinkSync(tailFile); + + done(); + }); + + // push some new log lines to trigger request.get() callback + setTimeout(function () { fs.appendFileSync(tailFile, 'line 2\n'); }, 100); + setTimeout(res.abort.bind(res), 200); + }); + + it('returns 404 for missing file', function (done) { + request.get(EXTERNAL_SERVER_URL + '/api/v1/installer/logs?filename=/tmp/randomtotally').end(function (error, result) { + expect(!error).to.be.ok(); + expect(result.statusCode).to.equal(404); + done(); + }); + }); + }); + + describe('retire', function () { + var data = { + data: { + tlsKey: 'key', + tlsCert: 'cert' + } + }; + + before(function (done) { + process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; // TODO: use a installer ca signed cert instead + server.start(done); + }); + + after(function (done) { + server.stop(done); + delete process.env.NODE_TLS_REJECT_UNAUTHORIZED; + }); + + Object.keys(data).forEach(function (key) { + it('fails due to missing ' + key, function (done) { + var dataCopy = _.merge({ }, data); + delete dataCopy[key]; + + request.post(EXTERNAL_SERVER_URL + '/api/v1/installer/retire').send(dataCopy).end(function (error, result) { + expect(error).to.not.be.ok(); + expect(result.statusCode).to.equal(400); + done(); + }); + }); + }); + + it('succeeds', function (done) { + request.post(EXTERNAL_SERVER_URL + '/api/v1/installer/retire').send(data).end(function (error, result) { + expect(error).to.not.be.ok(); + expect(result.statusCode).to.equal(202); + done(); + }); + }); + }); +}); + From 7d30d9e867759306179aa11447f0f456bc8ee783 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Wed, 5 Aug 2015 17:36:51 +0200 Subject: [PATCH 002/234] Use shrinkwrap instead of package.json for node module cache --- images/createBoxTarball | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/images/createBoxTarball b/images/createBoxTarball index cbbfeafce..7c1d1c9e5 100755 --- a/images/createBoxTarball +++ b/images/createBoxTarball @@ -57,7 +57,7 @@ chmod "o+rx,g+rx" "${bundle_dir}" # otherwise extracted tarball director won't b echo "Checking out code [${version}] into ${bundle_dir}" (cd "${box_dir}" && git archive --format=tar ${version} | (cd "${bundle_dir}" && tar xf -)) -if diff "${TMPDIR}/boxtarball.cache/package.json.all" "${bundle_dir}/package.json" >/dev/null 2>&1; then +if diff "${TMPDIR}/boxtarball.cache/npm-shrinkwrap.json.all" "${bundle_dir}/npm-shrinkwrap.json" >/dev/null 2>&1; then echo "Reusing dev modules from cache" cp -r "${TMPDIR}/boxtarball.cache/node_modules-all/." "${bundle_dir}/node_modules" else @@ -67,7 +67,7 @@ else echo "Caching dev dependencies" mkdir -p "${TMPDIR}/boxtarball.cache/node_modules-all" rsync -a --delete "${bundle_dir}/node_modules/" "${TMPDIR}/boxtarball.cache/node_modules-all/" - cp "${bundle_dir}/package.json" "${TMPDIR}/boxtarball.cache/package.json.all" + cp "${bundle_dir}/npm-shrinkwrap.json" "${TMPDIR}/boxtarball.cache/npm-shrinkwrap.json.all" fi echo "Building webadmin assets" @@ -78,7 +78,7 @@ rm -rf "${bundle_dir}/node_modules/" rm -rf "${bundle_dir}/webadmin/src" rm -rf "${bundle_dir}/gulpfile.js" -if diff "${TMPDIR}/boxtarball.cache/package.json.prod" "${bundle_dir}/package.json" >/dev/null 2>&1; then +if diff "${TMPDIR}/boxtarball.cache/npm-shrinkwrap.json.prod" "${bundle_dir}/npm-shrinkwrap.json" >/dev/null 2>&1; then echo "Reusing prod modules from cache" cp -r "${TMPDIR}/boxtarball.cache/node_modules-prod/." "${bundle_dir}/node_modules" else @@ -88,7 +88,7 @@ else echo "Caching prod dependencies" mkdir -p "${TMPDIR}/boxtarball.cache/node_modules-prod" rsync -a --delete "${bundle_dir}/node_modules/" "${TMPDIR}/boxtarball.cache/node_modules-prod/" - cp "${bundle_dir}/package.json" "${TMPDIR}/boxtarball.cache/package.json.prod" + cp "${bundle_dir}/npm-shrinkwrap.json" "${TMPDIR}/boxtarball.cache/npm-shrinkwrap.json.prod" fi echo "Create final tarball" From c78d09df66e7b2265d2bac7b013fd990827a1ef4 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Wed, 5 Aug 2015 17:37:07 +0200 Subject: [PATCH 003/234] Do not install optional dependencies for production This was needed due to the dtrace-provider failures as optional deps for ldapjs and bunyan --- images/createBoxTarball | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/createBoxTarball b/images/createBoxTarball index 7c1d1c9e5..f50aa5f89 100755 --- a/images/createBoxTarball +++ b/images/createBoxTarball @@ -83,7 +83,7 @@ if diff "${TMPDIR}/boxtarball.cache/npm-shrinkwrap.json.prod" "${bundle_dir}/npm cp -r "${TMPDIR}/boxtarball.cache/node_modules-prod/." "${bundle_dir}/node_modules" else echo "Installing modules for production" - (cd "${bundle_dir}" && npm install --production) + (cd "${bundle_dir}" && npm install --production --no-optional) echo "Caching prod dependencies" mkdir -p "${TMPDIR}/boxtarball.cache/node_modules-prod" From 7a431b9b83bc9296f5facf964c52eca5bdb0fb47 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 6 Aug 2015 14:07:07 -0700 Subject: [PATCH 004/234] 0.0.26 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index f90ad0b9d..310a9ef44 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -83,3 +83,6 @@ - Add support page - Really fix ldap issues +[0.0.26] +- Add configurePath support + From 6efb291449005ac42a4d0b7b5dd87cca6f1d414e Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Sat, 8 Aug 2015 19:13:13 -0700 Subject: [PATCH 005/234] 0.0.27 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 310a9ef44..008c68e8d 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -86,3 +86,6 @@ [0.0.26] - Add configurePath support +[0.0.27] +- Improved log collector + From 768654ae6381429833281a3db94459a850fc9614 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Mon, 10 Aug 2015 15:33:47 +0200 Subject: [PATCH 006/234] Version 0.0.28 changelog --- release/CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 008c68e8d..bdb4ebcf8 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -89,3 +89,7 @@ [0.0.27] - Improved log collector +[0.0.28] +- Improve app feedback +- Restyle login page +- Some bugfixes From aa80210075765202b1776806b76a31c732911deb Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 10 Aug 2015 14:56:49 -0700 Subject: [PATCH 007/234] Update base image to 15.04 --- images/digitalOceanFunctions.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/digitalOceanFunctions.sh b/images/digitalOceanFunctions.sh index 447973373..cee5fa8e2 100644 --- a/images/digitalOceanFunctions.sh +++ b/images/digitalOceanFunctions.sh @@ -25,7 +25,7 @@ function create_droplet() { local box_size="$3" local image_region="$4" - local ubuntu_image_slug="ubuntu-14-10-x64" + local ubuntu_image_slug="ubuntu-15-04-x64" # id=12658446 local data="{\"name\":\"${box_name}\",\"size\":\"${box_size}\",\"region\":\"${image_region}\",\"image\":\"${ubuntu_image_slug}\",\"ssh_keys\":[ \"${ssh_key_id}\" ],\"backups\":false}" From c51c715cee7eb4598fa78f52f602232a98b6b975 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 10 Aug 2015 16:21:42 -0700 Subject: [PATCH 008/234] Use systemctl --- images/initializeBaseUbuntuImage.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 87c2f881d..0f16b95de 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -90,7 +90,7 @@ apt-get -y install lxc-docker-1.5.0 ln -sf /usr/bin/docker.io /usr/local/bin/docker if [ ! -f "${DOCKER_DATA_FILE}" ]; then - service docker stop + systemctl stop docker if aufs_mounts=$(grep 'aufs' /proc/mounts | awk '{print$2}' | sort -r); then umount -l "${aufs_mounts}" fi @@ -106,7 +106,7 @@ if [ ! -f "${DOCKER_DATA_FILE}" ]; then echo 'DOCKER_OPTS="-s btrfs"' >> /etc/default/docker mount "${DOCKER_DATA_FILE}" - service docker start + systemctl start docker # give docker sometime to start up and create iptables rules sleep 10 fi @@ -205,7 +205,7 @@ case "\$1" in # this is a hack to fix ordering of iptables-restore and docker startup iptables-restore < /etc/iptables/rules.v4 - service docker restart + systemctl restart docker DEBUG="installer*,connect-lastmile" "\${FOREVER}" start -a -l "\${FOREVER_LOG}" -o "\${INSTALLER_LOG}" -e "\${INSTALLER_LOG}" "${INSTALLER_SOURCE_DIR}/src/server.js" ;; From 926fafd7f6a8098486896a383f1dca1073deecd3 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 10 Aug 2015 18:24:36 -0700 Subject: [PATCH 009/234] Add 0.0.29 changelog --- release/CHANGES | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/release/CHANGES b/release/CHANGES index bdb4ebcf8..fda90e0f5 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -92,4 +92,7 @@ [0.0.28] - Improve app feedback - Restyle login page -- Some bugfixes + +[0.0.29] +- Update to ubuntu 15.04 + From fd74be884848d004fb335a37f9ee3203d9d506c5 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 10 Aug 2015 19:17:21 -0700 Subject: [PATCH 010/234] Update docker to 1.7.0 --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 0f16b95de..1d4ca0fe7 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -86,7 +86,7 @@ echo "==== Install docker ====" echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 apt-get update -apt-get -y install lxc-docker-1.5.0 +apt-get -y install lxc-docker-1.7.0 ln -sf /usr/bin/docker.io /usr/local/bin/docker if [ ! -f "${DOCKER_DATA_FILE}" ]; then From e9af9fb16b635358a3328d91aa88e8ecee6a522c Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 10 Aug 2015 21:38:22 -0700 Subject: [PATCH 011/234] 0.0.30 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index fda90e0f5..843a55d07 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -96,3 +96,6 @@ [0.0.29] - Update to ubuntu 15.04 +[0.0.30] +- Move to docker 1.7 + From d1db38ba8e0b36465885d75518bc3a4a7f648f3e Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 11 Aug 2015 17:01:11 -0700 Subject: [PATCH 012/234] Add 0.0.31 changes --- release/CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 843a55d07..6b69601c6 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -99,3 +99,7 @@ [0.0.30] - Move to docker 1.7 +[0.0.31] +- WARNING: This update restarts your containers +- System processes are prioritized over apps + From bb74eef6014c388e11e9b49146aa0e1d684cbc67 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Wed, 12 Aug 2015 17:45:13 +0200 Subject: [PATCH 013/234] more changes for 0.0.31 --- release/CHANGES | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/CHANGES b/release/CHANGES index 6b69601c6..844f51ff7 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -102,4 +102,4 @@ [0.0.31] - WARNING: This update restarts your containers - System processes are prioritized over apps - +- Add ldap group support From 9e0bb6ca346e5cf62d4b284d94f4fbd431527126 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 12 Aug 2015 19:04:37 -0700 Subject: [PATCH 014/234] Use latest images --- images/initializeBaseUbuntuImage.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 1d4ca0fe7..bb18a982f 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -117,25 +117,25 @@ mkdir /etc/iptables && iptables-save > /etc/iptables/rules.v4 # now add the user to the docker group usermod "${USER}" -a -G docker echo "=== Pulling base docker images ===" -docker pull cloudron/base:0.3.0 +docker pull cloudron/base:0.3.1 echo "=== Pulling mysql addon image ===" -docker pull cloudron/mysql:0.3.0 +docker pull cloudron/mysql:0.3.1 echo "=== Pulling postgresql addon image ===" -docker pull cloudron/postgresql:0.3.0 +docker pull cloudron/postgresql:0.3.1 echo "=== Pulling redis addon image ===" -docker pull cloudron/redis:0.3.0 +docker pull cloudron/redis:0.3.1 echo "=== Pulling mongodb addon image ===" -docker pull cloudron/mongodb:0.3.0 +docker pull cloudron/mongodb:0.3.1 echo "=== Pulling graphite docker images ===" docker pull cloudron/graphite:0.3.1 echo "=== Pulling mail relay ===" -docker pull cloudron/mail:0.3.0 +docker pull cloudron/mail:0.3.1 echo "==== Install nginx ====" apt-get -y install nginx-full From 3557e8a125bb9b464519c12b12d6f74a8dbe2d4c Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 12 Aug 2015 19:52:43 -0700 Subject: [PATCH 015/234] Use constants from the box repo --- images/createDigitalOceanImage.sh | 3 +++ images/initializeBaseUbuntuImage.sh | 17 ++++++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index 5755a4c70..4561c6f74 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -138,6 +138,9 @@ while true; do sleep 30 done +echo "Copying INFRA_VERSION" +scp -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${SCRIPT_DIR}/../../box/setup/INFRA_VERSION" root@${droplet_ip}:. + echo "Copying installer source" cd "${INSTALLER_DIR}" git archive --format=tar HEAD | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "root@${droplet_ip}" "cat - > /root/installer.tar" diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index bb18a982f..c31078a60 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -11,6 +11,9 @@ readonly INSTALLER_REVISION="$1" readonly DOCKER_DATA_FILE="/root/docker_data.img" readonly USER_HOME_FILE="/root/user_home.img" +readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "${SOURCE_DIR}/INFRA_VERSION" + echo "==== Create User ${USER} ====" if ! id "${USER}"; then useradd "${USER}" -m @@ -117,25 +120,25 @@ mkdir /etc/iptables && iptables-save > /etc/iptables/rules.v4 # now add the user to the docker group usermod "${USER}" -a -G docker echo "=== Pulling base docker images ===" -docker pull cloudron/base:0.3.1 +docker pull "${BASE_IMAGE}" echo "=== Pulling mysql addon image ===" -docker pull cloudron/mysql:0.3.1 +docker pull "${MYSQL_IMAGE}" echo "=== Pulling postgresql addon image ===" -docker pull cloudron/postgresql:0.3.1 +docker pull "${POSTGRESQL_IMAGE}" echo "=== Pulling redis addon image ===" -docker pull cloudron/redis:0.3.1 +docker pull "${REDIS_IMAGE}" echo "=== Pulling mongodb addon image ===" -docker pull cloudron/mongodb:0.3.1 +docker pull "${MONGODB_IMAGE}" echo "=== Pulling graphite docker images ===" -docker pull cloudron/graphite:0.3.1 +docker pull "${GRAPHITE_IMAGE}" echo "=== Pulling mail relay ===" -docker pull cloudron/mail:0.3.1 +docker pull "${MAIL_IMAGE}" echo "==== Install nginx ====" apt-get -y install nginx-full From 9772cfe1f2b9dfa048d4e3149ff9a14a7033b625 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 12 Aug 2015 20:33:36 -0700 Subject: [PATCH 016/234] 0.0.32 changes --- release/CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 844f51ff7..1887dc534 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -103,3 +103,7 @@ - WARNING: This update restarts your containers - System processes are prioritized over apps - Add ldap group support + +[0.0.32] +- MySQL addon update + From f3cbb91527ed7a4aeb55f45e3fb3b2dcbb3bc697 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 12 Aug 2015 21:54:47 -0700 Subject: [PATCH 017/234] fetch the latest codes to generate the change log --- release/release | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/release/release b/release/release index 38ffcd9e2..7e7a36b37 100755 --- a/release/release +++ b/release/release @@ -67,7 +67,7 @@ function notifyAdmins(env, releases, callback) { var oldImageRef = releases[oldVersion].imageName.match('box-(prod|staging|dev)-([0-9a-z.]+)-.*')[2], newImageRef = releases[newVersion].imageName.match('box-(prod|staging|dev)-([0-9a-z.]+)-.*')[2]; - var imageLogs = execSync(util.format('git log %s..%s --format=oneline', oldImageRef, newImageRef), { cwd: __dirname }).toString('utf8'), + var imageLogs = execSync(util.format('git fetch && git log %s..%s --format=oneline', oldImageRef, newImageRef), { cwd: __dirname }).toString('utf8'), imageStat = execSync(util.format('git diff --stat %s..%s', oldImageRef, newImageRef), { cwd: __dirname }).toString('utf8'); var oldBoxRef = url.parse(releases[oldVersion].sourceTarballUrl).path.match('/box-(.*).tar.gz')[1], @@ -75,7 +75,7 @@ function notifyAdmins(env, releases, callback) { var boxRepo = path.resolve(__dirname, '../../box'); - var boxLogs = execSync(util.format('git log %s..%s --format=oneline', oldBoxRef, newBoxRef), { cwd: boxRepo }).toString('utf8'), + var boxLogs = execSync(util.format('git fetch && git log %s..%s --format=oneline', oldBoxRef, newBoxRef), { cwd: boxRepo }).toString('utf8'), boxStat = execSync(util.format('git diff --stat %s..%s', oldBoxRef, newBoxRef), { cwd: boxRepo }).toString('utf8'); var textBody = util.format( From d0eab70974c1ee5bb64326afca274aab71ddba28 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 13 Aug 2015 16:06:37 -0700 Subject: [PATCH 018/234] 0.0.33 changes --- release/CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 1887dc534..590595013 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -107,3 +107,7 @@ [0.0.32] - MySQL addon update +[0.0.33] +- Fix graphs +- Fix MySQL 5.6 memory usage + From 29c3233375b9be4d9b83f576208b992a4e4c70b1 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 17 Aug 2015 10:12:09 -0700 Subject: [PATCH 019/234] 0.0.34 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 590595013..23c61c833 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -111,3 +111,6 @@ - Fix graphs - Fix MySQL 5.6 memory usage +[0.0.34] +- Correctly mark apps pending for approval + From a7729e1597a5f3fe13487538d8203309931ee699 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 18 Aug 2015 23:46:25 -0700 Subject: [PATCH 020/234] Add v0.0.35 changelog --- release/CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 23c61c833..ce318880a 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -114,3 +114,5 @@ [0.0.34] - Correctly mark apps pending for approval +[0.0.35] +- Fix ldap admin group username From 20df96b6ba9a6050f9bdbae1ae75f694b5a40ecb Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 24 Aug 2015 09:37:20 -0700 Subject: [PATCH 021/234] Add v0.0.36 changes --- release/CHANGES | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index ce318880a..8a55e8e90 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -116,3 +116,8 @@ [0.0.35] - Fix ldap admin group username + +[0.0.36] +- Fix restore without backup +- Optimize image deletion during updates + From 14333e291046e0f6fb28fb70fc50b2737dcd390d Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 24 Aug 2015 22:33:35 -0700 Subject: [PATCH 022/234] Enable memory accounting --- images/initializeBaseUbuntuImage.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index c31078a60..de930f8d8 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -117,6 +117,10 @@ fi # ubuntu will restore iptables from this file automatically. this is here so that docker's chain is saved to this file mkdir /etc/iptables && iptables-save > /etc/iptables/rules.v4 +echo "=== Enable memory accounting ==" +sed -e 's/GRUB_CMDLINE_LINUX=.*/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"/' -i /etc/default/grub +update-grub + # now add the user to the docker group usermod "${USER}" -a -G docker echo "=== Pulling base docker images ===" @@ -173,8 +177,8 @@ if [[ ! -f "${USER_HOME_FILE}" ]]; then fi echo "=== Install tmpreaper ===" -sudo apt-get install -y tmpreaper -sudo sed -e 's/SHOWWARNING=true/# SHOWWARNING=true/' -i /etc/tmpreaper.conf +apt-get install -y tmpreaper +sed -e 's/SHOWWARNING=true/# SHOWWARNING=true/' -i /etc/tmpreaper.conf echo "==== Extracting installer source ====" rm -rf "${INSTALLER_SOURCE_DIR}" && mkdir -p "${INSTALLER_SOURCE_DIR}" From a033480500e4e6f7ddf715cf20a34f4a73f0671b Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 24 Aug 2015 23:31:28 -0700 Subject: [PATCH 023/234] More 0.0.36 changes --- release/CHANGES | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/CHANGES b/release/CHANGES index 8a55e8e90..c559ff110 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -120,4 +120,4 @@ [0.0.36] - Fix restore without backup - Optimize image deletion during updates - +- Add memory accounting From afdde9b0320cbb2a2aa09a75517abbac30b64717 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 25 Aug 2015 10:46:23 -0700 Subject: [PATCH 024/234] Disable forwarding from containers to metadata IP --- images/initializeBaseUbuntuImage.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index de930f8d8..ce4e82b30 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -114,6 +114,9 @@ if [ ! -f "${DOCKER_DATA_FILE}" ]; then sleep 10 fi +# Disable forwarding to metadata route from containers +iptables -I FORWARD -d 169.254.169.254 -j DROP + # ubuntu will restore iptables from this file automatically. this is here so that docker's chain is saved to this file mkdir /etc/iptables && iptables-save > /etc/iptables/rules.v4 From ba5424c250c5eb9d83036d0eb7e6c3d6df915250 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 25 Aug 2015 11:09:09 -0700 Subject: [PATCH 025/234] 0.0.37 changes --- release/CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index c559ff110..d16f8d42c 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -121,3 +121,7 @@ - Fix restore without backup - Optimize image deletion during updates - Add memory accounting + +[0.0.37] +- Restrict access to metadata from containers + From 93042d862dcbf88debacd7b1aad6e6cd8d635735 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 25 Aug 2015 11:27:49 -0700 Subject: [PATCH 026/234] Fix CHANGES --- release/CHANGES | 2 -- 1 file changed, 2 deletions(-) diff --git a/release/CHANGES b/release/CHANGES index d16f8d42c..b7046b738 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -121,7 +121,5 @@ - Fix restore without backup - Optimize image deletion during updates - Add memory accounting - -[0.0.37] - Restrict access to metadata from containers From af8f4b64c036725cf1a82cfd7730688d472aa6b7 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 25 Aug 2015 15:05:16 -0700 Subject: [PATCH 027/234] Use userData from metadata API --- src/installer.js | 10 --------- src/server.js | 54 +++++++----------------------------------------- 2 files changed, 7 insertions(+), 57 deletions(-) diff --git a/src/installer.js b/src/installer.js index f2a2bffe1..3d0665aeb 100644 --- a/src/installer.js +++ b/src/installer.js @@ -12,8 +12,6 @@ exports = module.exports = { InstallerError: InstallerError, provision: provision, - restore: restore, - update: update, retire: retire }; @@ -33,14 +31,6 @@ util.inherits(InstallerError, Error); InstallerError.INTERNAL_ERROR = 1; InstallerError.ALREADY_PROVISIONED = 2; -function update(args, callback) { - provision(args, callback); -} - -function restore(args, callback) { - provision(args, callback); -} - function spawn(tag, cmd, args, callback) { assert.strictEqual(typeof tag, 'string'); assert.strictEqual(typeof cmd, 'string'); diff --git a/src/server.js b/src/server.js index f232ab02d..71106a65f 100755 --- a/src/server.js +++ b/src/server.js @@ -51,26 +51,6 @@ function checkData(data) { } } -function restore(req, res, next) { - assert.strictEqual(typeof req.body, 'object'); - - if (typeof req.body.sourceTarballUrl !== 'string') return next(new HttpError(400, 'No sourceTarballUrl provided')); - - if (!req.body.data || typeof req.body.data !== 'object') return next(new HttpError(400, 'No data provided')); - - checkData(req.body.data); - - debug('restore: received from appstore %j', req.body); - - installer.restore(req.body, function (error) { - if (error) console.error(error); - }); - - announce.stop(function () { }); - - next(new HttpSuccess(202, { })); -} - function provision(req, res, next) { assert.strictEqual(typeof req.body, 'object'); @@ -82,33 +62,11 @@ function provision(req, res, next) { debug('provision: received from appstore %j', req.body); - installer.provision(req.body, function (error) { - if (error) console.error(error); - }); - announce.stop(function () { }); next(new HttpSuccess(202, { })); } -function update(req, res, next) { - assert.strictEqual(typeof req.body, 'object'); - - if (typeof req.body.sourceTarballUrl !== 'string') return next(new HttpError(400, 'No sourceTarballUrl provided')); - - if (!req.body.data || typeof req.body.data !== 'object') return next(new HttpError(400, 'No data provided')); - - checkData(req.body.data); - - debug('update: started'); - - installer.update(req.body, function (error) { - if (error) console.error(error); - }); - - next(new HttpSuccess(202, { })); -} - function retire(req, res, next) { assert.strictEqual(typeof req.body, 'object'); @@ -183,7 +141,7 @@ function startUpdateServer(callback) { .use(router) .use(lastMile()); - router.post('/api/v1/installer/update', update); + router.post('/api/v1/installer/update', provision); gHttpServer = http.createServer(app); gHttpServer.on('error', console.error); @@ -207,11 +165,11 @@ function startProvisionServer(callback) { .use(lastMile()); router.post('/api/v1/installer/provision', provision); - router.post('/api/v1/installer/restore', restore); + router.post('/api/v1/installer/restore', provision); router.post('/api/v1/installer/retire', retire); router.get ('/api/v1/installer/logs', logs); router.post('/api/v1/installer/backup', backup); - router.post('/api/v1/installer/update', update); + router.post('/api/v1/installer/update', provision); var caPath = path.join(__dirname, process.env.NODE_ENV === 'test' ? '../../keys/installer_ca' : 'certs'); var certPath = path.join(__dirname, process.env.NODE_ENV === 'test' ? '../../keys/installer' : 'certs'); @@ -265,13 +223,15 @@ function start(callback) { return; } - var apiServerOrigin = JSON.parse(result.body.user_data).apiServerOrigin; + var userData = JSON.parse(result.body.user_data); + var apiServerOrigin = userData.apiServerOrigin; debug('Using apiServerOrigin from metadata: %s', apiServerOrigin); async.series([ announce.start.bind(null, apiServerOrigin), startUpdateServer, - startProvisionServer + startProvisionServer, + installer.provision.bind(null, userData) ], callback); }); } From 3b7ef4615a8d529d5f3c541e4509f0e9e1507354 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 25 Aug 2015 15:06:39 -0700 Subject: [PATCH 028/234] installer does not announce anymore --- src/announce.js | 65 -------------------------------------- src/server.js | 7 +--- src/test/installer-test.js | 27 ---------------- 3 files changed, 1 insertion(+), 98 deletions(-) delete mode 100644 src/announce.js diff --git a/src/announce.js b/src/announce.js deleted file mode 100644 index 7eea6a32c..000000000 --- a/src/announce.js +++ /dev/null @@ -1,65 +0,0 @@ -/* jslint node: true */ - -'use strict'; - -var assert = require('assert'), - debug = require('debug')('installer:announce'), - fs = require('fs'), - os = require('os'), - superagent = require('superagent'); - -exports = module.exports = { - start: start, - stop: stop -}; - -var gAnnounceTimerId = null; - -var ANNOUNCE_INTERVAL = parseInt(process.env.ANNOUNCE_INTERVAL, 10) || 60000; // exported for testing - -function start(apiServerOrigin, callback) { - assert.strictEqual(typeof apiServerOrigin, 'string'); - assert.strictEqual(typeof callback, 'function'); - - if (fs.existsSync('/home/yellowtent/box')) { - debug('already provisioned, skipping announce'); - return callback(null); - } - - debug('started'); - - gAnnounceTimerId = setInterval(doAnnounce.bind(null, apiServerOrigin), ANNOUNCE_INTERVAL); - doAnnounce(apiServerOrigin); - - callback(null); -} - -function stop(callback) { - assert.strictEqual(typeof callback, 'function'); - - debug('Stopping announce'); - - clearInterval(gAnnounceTimerId); - gAnnounceTimerId = null; - - callback(null); -} - -function doAnnounce(apiServerOrigin) { - // On Digital Ocean, the only value which we can give a new droplet is the hostname. - // We use that value to identify the droplet by the appstore server when the droplet - // announce itself. This identifier can look different for other box providers. - var hostname = os.hostname(); - var url = apiServerOrigin + '/api/v1/boxes/' + hostname + '/announce'; - debug('box with %s.', url); - - superagent.get(url).timeout(10000).end(function (error, result) { - if (error || result.statusCode !== 200) { - debug('unable to announce to app server, try again.', error); - return; - } - - debug('success'); - }); -} - diff --git a/src/server.js b/src/server.js index 71106a65f..a92b4a7dd 100755 --- a/src/server.js +++ b/src/server.js @@ -4,8 +4,7 @@ 'use strict'; -var announce = require('./announce.js'), - assert = require('assert'), +var assert = require('assert'), async = require('async'), debug = require('debug')('installer:server'), express = require('express'), @@ -62,8 +61,6 @@ function provision(req, res, next) { debug('provision: received from appstore %j', req.body); - announce.stop(function () { }); - next(new HttpSuccess(202, { })); } @@ -228,7 +225,6 @@ function start(callback) { debug('Using apiServerOrigin from metadata: %s', apiServerOrigin); async.series([ - announce.start.bind(null, apiServerOrigin), startUpdateServer, startProvisionServer, installer.provision.bind(null, userData) @@ -240,7 +236,6 @@ function stop(callback) { assert.strictEqual(typeof callback, 'function'); async.series([ - announce.stop, stopUpdateServer, stopProvisionServer ], callback); diff --git a/src/test/installer-test.js b/src/test/installer-test.js index b53b7e40e..6d7670282 100644 --- a/src/test/installer-test.js +++ b/src/test/installer-test.js @@ -108,33 +108,6 @@ describe('Server', function () { }); }); - describe('provision - announce', function () { - var failingGet = null; - - before(function (done) { - process.env.ANNOUNCE_INTERVAL = 20; - - var scope = nock(APPSERVER_ORIGIN); - failingGet = scope.get('/api/v1/boxes/' + FQDN + '/announce'); - failingGet.times(5).reply(502); - - server.start(done); - }); - - after(function (done) { - process.env.ANNOUNCE_INTERVAL = 60000; - // failingGet.removeInterceptor({ hostname: 'appserver' }); - server.stop(done); - }); - - it('sends announce request repeatedly', function (done) { - setTimeout(function () { - expect(failingGet.counter).to.be.below(6); // counter is nock update - done(); - }, 100); - }); - }); - describe('provision - restore', function () { var data = { sourceTarballUrl: 'https://sourceTarballUrl', From 4978984d754718650f300dedca517c6fe9f9e350 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 25 Aug 2015 15:09:35 -0700 Subject: [PATCH 029/234] Remove provision, restore, update routes --- src/server.js | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/server.js b/src/server.js index a92b4a7dd..8514f394a 100755 --- a/src/server.js +++ b/src/server.js @@ -161,12 +161,9 @@ function startProvisionServer(callback) { .use(router) .use(lastMile()); - router.post('/api/v1/installer/provision', provision); - router.post('/api/v1/installer/restore', provision); router.post('/api/v1/installer/retire', retire); router.get ('/api/v1/installer/logs', logs); router.post('/api/v1/installer/backup', backup); - router.post('/api/v1/installer/update', provision); var caPath = path.join(__dirname, process.env.NODE_ENV === 'test' ? '../../keys/installer_ca' : 'certs'); var certPath = path.join(__dirname, process.env.NODE_ENV === 'test' ? '../../keys/installer' : 'certs'); From 9adf5167c90c2c143524151824575db2e1f56d48 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 25 Aug 2015 15:25:03 -0700 Subject: [PATCH 030/234] Remove apiServerOrigin --- src/server.js | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/server.js b/src/server.js index 8514f394a..7c80e9400 100755 --- a/src/server.js +++ b/src/server.js @@ -218,8 +218,6 @@ function start(callback) { } var userData = JSON.parse(result.body.user_data); - var apiServerOrigin = userData.apiServerOrigin; - debug('Using apiServerOrigin from metadata: %s', apiServerOrigin); async.series([ startUpdateServer, From 7efa2fd0726deb1e102e29820ea84447caa569f1 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 25 Aug 2015 15:54:15 -0700 Subject: [PATCH 031/234] Make update route work --- src/installer.js | 2 +- src/server.js | 23 +++++++++++++++++------ 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/src/installer.js b/src/installer.js index 3d0665aeb..83bdb8616 100644 --- a/src/installer.js +++ b/src/installer.js @@ -78,7 +78,7 @@ function provision(args, callback) { var pargs = [ INSTALLER_CMD ]; pargs.push('--sourcetarballurl', args.sourceTarballUrl); - pargs.push('--data', JSON.stringify(args.data)); + pargs.push('--data', JSON.stringify(args)); debug('provision: calling with args %j', pargs); diff --git a/src/server.js b/src/server.js index 7c80e9400..ba793ad5b 100755 --- a/src/server.js +++ b/src/server.js @@ -50,18 +50,28 @@ function checkData(data) { } } -function provision(req, res, next) { +function update(req, res, next) { assert.strictEqual(typeof req.body, 'object'); if (typeof req.body.sourceTarballUrl !== 'string') return next(new HttpError(400, 'No sourceTarballUrl provided')); - if (!req.body.data || typeof req.body.data !== 'object') return next(new HttpError(400, 'No data provided')); + debug('provision: received from box %j', req.body); - checkData(req.body.data); + superagent.get('http://169.254.169.254/metadata/v1.json').end(function (error, result) { + if (error || result.statusCode !== 200) { + console.error('Error getting metadata', error); + return; + } - debug('provision: received from appstore %j', req.body); + var userData = JSON.parse(result.body.user_data); + userData.sourceTarballUrl = req.body.sourceTarballUrl; - next(new HttpSuccess(202, { })); + installer.provision(userData, function (error) { + if (error) console.error(error); + }); + + next(new HttpSuccess(202, { })); + }); } function retire(req, res, next) { @@ -138,7 +148,7 @@ function startUpdateServer(callback) { .use(router) .use(lastMile()); - router.post('/api/v1/installer/update', provision); + router.post('/api/v1/installer/update', update); gHttpServer = http.createServer(app); gHttpServer.on('error', console.error); @@ -211,6 +221,7 @@ function start(callback) { debug('starting'); + // FIXME: this should only happen once superagent.get('http://169.254.169.254/metadata/v1.json').end(function (error, result) { if (error || result.statusCode !== 200) { console.error('Error getting metadata', error); From 2c7cf9faa16f815ae58b0dbca97d8091524ce093 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Tue, 25 Aug 2015 20:50:57 -0700 Subject: [PATCH 032/234] 0.0.37 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index b7046b738..25058eb1a 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -123,3 +123,6 @@ - Add memory accounting - Restrict access to metadata from containers +[0.0.37] +- Prepare for Selfhosting 1. part +- Use userData instead of provisioning calls From 4d607ada9dbd32cb43c4decc3b7999ecb962d813 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 25 Aug 2015 22:27:45 -0700 Subject: [PATCH 033/234] Make installer a systemd service --- images/initializeBaseUbuntuImage.sh | 48 +-- npm-shrinkwrap.json | 477 ---------------------------- package.json | 1 - 3 files changed, 17 insertions(+), 509 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index ce4e82b30..3f2ea4854 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -199,41 +199,27 @@ cd "${INSTALLER_SOURCE_DIR}" && npm install --production echo "==== Make the user own his home ====" chown "${USER}:${USER}" -R "/home/${USER}" -echo "==== Install init script ====" -cat > /etc/init.d/cloudron-bootstrap < /etc/systemd/user/cloudron-installer.service <&2 - exit 3 - ;; -esac +[Install] +WantedBy=multi-user.target +Alias=installer.service EOF -chmod +x /etc/init.d/cloudron-bootstrap -update-rc.d cloudron-bootstrap defaults 99 +systemctl enable cloudron-installer sync diff --git a/npm-shrinkwrap.json b/npm-shrinkwrap.json index aed99cbf2..d2b1c0442 100644 --- a/npm-shrinkwrap.json +++ b/npm-shrinkwrap.json @@ -300,483 +300,6 @@ } } }, - "forever": { - "version": "0.14.1", - "from": "http://registry.npmjs.org/forever/-/forever-0.14.1.tgz", - "resolved": "http://registry.npmjs.org/forever/-/forever-0.14.1.tgz", - "dependencies": { - "colors": { - "version": "0.6.2", - "from": "https://registry.npmjs.org/colors/-/colors-0.6.2.tgz", - "resolved": "https://registry.npmjs.org/colors/-/colors-0.6.2.tgz" - }, - "cliff": { - "version": "0.1.10", - "from": "http://registry.npmjs.org/cliff/-/cliff-0.1.10.tgz", - "resolved": "http://registry.npmjs.org/cliff/-/cliff-0.1.10.tgz", - "dependencies": { - "colors": { - "version": "1.0.3", - "from": "https://registry.npmjs.org/colors/-/colors-1.0.3.tgz", - "resolved": "https://registry.npmjs.org/colors/-/colors-1.0.3.tgz" - }, - "eyes": { - "version": "0.1.8", - "from": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", - "resolved": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz" - } - } - }, - "flatiron": { - "version": "0.4.3", - "from": "http://registry.npmjs.org/flatiron/-/flatiron-0.4.3.tgz", - "resolved": "http://registry.npmjs.org/flatiron/-/flatiron-0.4.3.tgz", - "dependencies": { - "broadway": { - "version": "0.3.6", - "from": "http://registry.npmjs.org/broadway/-/broadway-0.3.6.tgz", - "resolved": "http://registry.npmjs.org/broadway/-/broadway-0.3.6.tgz", - "dependencies": { - "cliff": { - "version": "0.1.9", - "from": "http://registry.npmjs.org/cliff/-/cliff-0.1.9.tgz", - "resolved": "http://registry.npmjs.org/cliff/-/cliff-0.1.9.tgz", - "dependencies": { - "eyes": { - "version": "0.1.8", - "from": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", - "resolved": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz" - } - } - }, - "eventemitter2": { - "version": "0.4.14", - "from": "http://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz", - "resolved": "http://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz" - }, - "winston": { - "version": "0.8.0", - "from": "http://registry.npmjs.org/winston/-/winston-0.8.0.tgz", - "resolved": "http://registry.npmjs.org/winston/-/winston-0.8.0.tgz", - "dependencies": { - "async": { - "version": "0.2.10", - "from": "https://registry.npmjs.org/async/-/async-0.2.10.tgz", - "resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz" - }, - "cycle": { - "version": "1.0.3", - "from": "http://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz", - "resolved": "http://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz" - }, - "eyes": { - "version": "0.1.8", - "from": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", - "resolved": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz" - }, - "pkginfo": { - "version": "0.3.0", - "from": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz", - "resolved": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz" - }, - "stack-trace": { - "version": "0.0.9", - "from": "http://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz", - "resolved": "http://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz" - } - } - } - } - }, - "optimist": { - "version": "0.6.0", - "from": "http://registry.npmjs.org/optimist/-/optimist-0.6.0.tgz", - "resolved": "http://registry.npmjs.org/optimist/-/optimist-0.6.0.tgz", - "dependencies": { - "wordwrap": { - "version": "0.0.2", - "from": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", - "resolved": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz" - }, - "minimist": { - "version": "0.0.10", - "from": "http://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz", - "resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz" - } - } - }, - "prompt": { - "version": "0.2.14", - "from": "http://registry.npmjs.org/prompt/-/prompt-0.2.14.tgz", - "resolved": "http://registry.npmjs.org/prompt/-/prompt-0.2.14.tgz", - "dependencies": { - "pkginfo": { - "version": "0.3.0", - "from": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz", - "resolved": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz" - }, - "read": { - "version": "1.0.5", - "from": "http://registry.npmjs.org/read/-/read-1.0.5.tgz", - "resolved": "http://registry.npmjs.org/read/-/read-1.0.5.tgz", - "dependencies": { - "mute-stream": { - "version": "0.0.4", - "from": "http://registry.npmjs.org/mute-stream/-/mute-stream-0.0.4.tgz", - "resolved": "http://registry.npmjs.org/mute-stream/-/mute-stream-0.0.4.tgz" - } - } - }, - "revalidator": { - "version": "0.1.8", - "from": "http://registry.npmjs.org/revalidator/-/revalidator-0.1.8.tgz", - "resolved": "http://registry.npmjs.org/revalidator/-/revalidator-0.1.8.tgz" - } - } - }, - "director": { - "version": "1.2.7", - "from": "http://registry.npmjs.org/director/-/director-1.2.7.tgz", - "resolved": "http://registry.npmjs.org/director/-/director-1.2.7.tgz" - } - } - }, - "forever-monitor": { - "version": "1.5.2", - "from": "http://registry.npmjs.org/forever-monitor/-/forever-monitor-1.5.2.tgz", - "resolved": "http://registry.npmjs.org/forever-monitor/-/forever-monitor-1.5.2.tgz", - "dependencies": { - "broadway": { - "version": "0.3.6", - "from": "http://registry.npmjs.org/broadway/-/broadway-0.3.6.tgz", - "resolved": "http://registry.npmjs.org/broadway/-/broadway-0.3.6.tgz", - "dependencies": { - "cliff": { - "version": "0.1.9", - "from": "http://registry.npmjs.org/cliff/-/cliff-0.1.9.tgz", - "resolved": "http://registry.npmjs.org/cliff/-/cliff-0.1.9.tgz", - "dependencies": { - "eyes": { - "version": "0.1.8", - "from": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", - "resolved": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz" - } - } - }, - "eventemitter2": { - "version": "0.4.14", - "from": "http://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz", - "resolved": "http://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz" - }, - "winston": { - "version": "0.8.0", - "from": "http://registry.npmjs.org/winston/-/winston-0.8.0.tgz", - "resolved": "http://registry.npmjs.org/winston/-/winston-0.8.0.tgz", - "dependencies": { - "async": { - "version": "0.2.10", - "from": "https://registry.npmjs.org/async/-/async-0.2.10.tgz", - "resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz" - }, - "cycle": { - "version": "1.0.3", - "from": "http://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz", - "resolved": "http://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz" - }, - "eyes": { - "version": "0.1.8", - "from": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", - "resolved": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz" - }, - "pkginfo": { - "version": "0.3.0", - "from": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz", - "resolved": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz" - }, - "stack-trace": { - "version": "0.0.9", - "from": "http://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz", - "resolved": "http://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz" - } - } - } - } - }, - "minimatch": { - "version": "1.0.0", - "from": "http://registry.npmjs.org/minimatch/-/minimatch-1.0.0.tgz", - "resolved": "http://registry.npmjs.org/minimatch/-/minimatch-1.0.0.tgz", - "dependencies": { - "lru-cache": { - "version": "2.5.0", - "from": "http://registry.npmjs.org/lru-cache/-/lru-cache-2.5.0.tgz", - "resolved": "http://registry.npmjs.org/lru-cache/-/lru-cache-2.5.0.tgz" - }, - "sigmund": { - "version": "1.0.0", - "from": "http://registry.npmjs.org/sigmund/-/sigmund-1.0.0.tgz", - "resolved": "http://registry.npmjs.org/sigmund/-/sigmund-1.0.0.tgz" - } - } - }, - "ps-tree": { - "version": "0.0.3", - "from": "http://registry.npmjs.org/ps-tree/-/ps-tree-0.0.3.tgz", - "resolved": "http://registry.npmjs.org/ps-tree/-/ps-tree-0.0.3.tgz", - "dependencies": { - "event-stream": { - "version": "0.5.3", - "from": "http://registry.npmjs.org/event-stream/-/event-stream-0.5.3.tgz", - "resolved": "http://registry.npmjs.org/event-stream/-/event-stream-0.5.3.tgz", - "dependencies": { - "optimist": { - "version": "0.2.8", - "from": "http://registry.npmjs.org/optimist/-/optimist-0.2.8.tgz", - "resolved": "http://registry.npmjs.org/optimist/-/optimist-0.2.8.tgz", - "dependencies": { - "wordwrap": { - "version": "0.0.2", - "from": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", - "resolved": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz" - } - } - } - } - } - } - }, - "watch": { - "version": "0.13.0", - "from": "http://registry.npmjs.org/watch/-/watch-0.13.0.tgz", - "resolved": "http://registry.npmjs.org/watch/-/watch-0.13.0.tgz", - "dependencies": { - "minimist": { - "version": "1.1.1", - "from": "http://registry.npmjs.org/minimist/-/minimist-1.1.1.tgz", - "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.1.1.tgz" - } - } - } - } - }, - "nconf": { - "version": "0.6.9", - "from": "http://registry.npmjs.org/nconf/-/nconf-0.6.9.tgz", - "resolved": "http://registry.npmjs.org/nconf/-/nconf-0.6.9.tgz", - "dependencies": { - "async": { - "version": "0.2.9", - "from": "http://registry.npmjs.org/async/-/async-0.2.9.tgz", - "resolved": "http://registry.npmjs.org/async/-/async-0.2.9.tgz" - }, - "ini": { - "version": "1.3.3", - "from": "http://registry.npmjs.org/ini/-/ini-1.3.3.tgz", - "resolved": "http://registry.npmjs.org/ini/-/ini-1.3.3.tgz" - }, - "optimist": { - "version": "0.6.0", - "from": "http://registry.npmjs.org/optimist/-/optimist-0.6.0.tgz", - "resolved": "http://registry.npmjs.org/optimist/-/optimist-0.6.0.tgz", - "dependencies": { - "wordwrap": { - "version": "0.0.2", - "from": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", - "resolved": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz" - }, - "minimist": { - "version": "0.0.10", - "from": "http://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz", - "resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz" - } - } - } - } - }, - "nssocket": { - "version": "0.5.3", - "from": "http://registry.npmjs.org/nssocket/-/nssocket-0.5.3.tgz", - "resolved": "http://registry.npmjs.org/nssocket/-/nssocket-0.5.3.tgz", - "dependencies": { - "eventemitter2": { - "version": "0.4.14", - "from": "http://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz", - "resolved": "http://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz" - }, - "lazy": { - "version": "1.0.11", - "from": "http://registry.npmjs.org/lazy/-/lazy-1.0.11.tgz", - "resolved": "http://registry.npmjs.org/lazy/-/lazy-1.0.11.tgz" - } - } - }, - "optimist": { - "version": "0.6.1", - "from": "http://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz", - "resolved": "http://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz", - "dependencies": { - "wordwrap": { - "version": "0.0.2", - "from": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", - "resolved": "http://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz" - }, - "minimist": { - "version": "0.0.10", - "from": "http://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz", - "resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz" - } - } - }, - "timespan": { - "version": "2.3.0", - "from": "http://registry.npmjs.org/timespan/-/timespan-2.3.0.tgz", - "resolved": "http://registry.npmjs.org/timespan/-/timespan-2.3.0.tgz" - }, - "utile": { - "version": "0.2.1", - "from": "http://registry.npmjs.org/utile/-/utile-0.2.1.tgz", - "resolved": "http://registry.npmjs.org/utile/-/utile-0.2.1.tgz", - "dependencies": { - "async": { - "version": "0.2.10", - "from": "https://registry.npmjs.org/async/-/async-0.2.10.tgz", - "resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz" - }, - "deep-equal": { - "version": "1.0.0", - "from": "http://registry.npmjs.org/deep-equal/-/deep-equal-1.0.0.tgz", - "resolved": "http://registry.npmjs.org/deep-equal/-/deep-equal-1.0.0.tgz" - }, - "i": { - "version": "0.3.3", - "from": "http://registry.npmjs.org/i/-/i-0.3.3.tgz", - "resolved": "http://registry.npmjs.org/i/-/i-0.3.3.tgz" - }, - "mkdirp": { - "version": "0.5.0", - "from": "http://registry.npmjs.org/mkdirp/-/mkdirp-0.5.0.tgz", - "resolved": "http://registry.npmjs.org/mkdirp/-/mkdirp-0.5.0.tgz", - "dependencies": { - "minimist": { - "version": "0.0.8", - "from": "http://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", - "resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz" - } - } - }, - "ncp": { - "version": "0.4.2", - "from": "http://registry.npmjs.org/ncp/-/ncp-0.4.2.tgz", - "resolved": "http://registry.npmjs.org/ncp/-/ncp-0.4.2.tgz" - }, - "rimraf": { - "version": "2.3.2", - "from": "https://registry.npmjs.org/rimraf/-/rimraf-2.3.2.tgz", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.3.2.tgz", - "dependencies": { - "glob": { - "version": "4.5.3", - "from": "http://registry.npmjs.org/glob/-/glob-4.5.3.tgz", - "resolved": "http://registry.npmjs.org/glob/-/glob-4.5.3.tgz", - "dependencies": { - "inflight": { - "version": "1.0.4", - "from": "http://registry.npmjs.org/inflight/-/inflight-1.0.4.tgz", - "resolved": "http://registry.npmjs.org/inflight/-/inflight-1.0.4.tgz", - "dependencies": { - "wrappy": { - "version": "1.0.1", - "from": "http://registry.npmjs.org/wrappy/-/wrappy-1.0.1.tgz", - "resolved": "http://registry.npmjs.org/wrappy/-/wrappy-1.0.1.tgz" - } - } - }, - "inherits": { - "version": "2.0.1", - "from": "http://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz", - "resolved": "http://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz" - }, - "minimatch": { - "version": "2.0.4", - "from": "http://registry.npmjs.org/minimatch/-/minimatch-2.0.4.tgz", - "resolved": "http://registry.npmjs.org/minimatch/-/minimatch-2.0.4.tgz", - "dependencies": { - "brace-expansion": { - "version": "1.1.0", - "from": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.0.tgz", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.0.tgz", - "dependencies": { - "balanced-match": { - "version": "0.2.0", - "from": "https://registry.npmjs.org/balanced-match/-/balanced-match-0.2.0.tgz", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-0.2.0.tgz" - }, - "concat-map": { - "version": "0.0.1", - "from": "http://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "resolved": "http://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz" - } - } - } - } - }, - "once": { - "version": "1.3.1", - "from": "http://registry.npmjs.org/once/-/once-1.3.1.tgz", - "resolved": "http://registry.npmjs.org/once/-/once-1.3.1.tgz", - "dependencies": { - "wrappy": { - "version": "1.0.1", - "from": "http://registry.npmjs.org/wrappy/-/wrappy-1.0.1.tgz", - "resolved": "http://registry.npmjs.org/wrappy/-/wrappy-1.0.1.tgz" - } - } - } - } - } - } - } - } - }, - "winston": { - "version": "0.8.3", - "from": "http://registry.npmjs.org/winston/-/winston-0.8.3.tgz", - "resolved": "http://registry.npmjs.org/winston/-/winston-0.8.3.tgz", - "dependencies": { - "async": { - "version": "0.2.10", - "from": "https://registry.npmjs.org/async/-/async-0.2.10.tgz", - "resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz" - }, - "cycle": { - "version": "1.0.3", - "from": "http://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz", - "resolved": "http://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz" - }, - "eyes": { - "version": "0.1.8", - "from": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", - "resolved": "http://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz" - }, - "isstream": { - "version": "0.1.2", - "from": "http://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "resolved": "http://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz" - }, - "pkginfo": { - "version": "0.3.0", - "from": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz", - "resolved": "http://registry.npmjs.org/pkginfo/-/pkginfo-0.3.0.tgz" - }, - "stack-trace": { - "version": "0.0.9", - "from": "http://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz", - "resolved": "http://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz" - } - } - } - } - }, "json": { "version": "9.0.3", "from": "https://registry.npmjs.org/json/-/json-9.0.3.tgz", diff --git a/package.json b/package.json index 14c4d5bd1..9825a1949 100644 --- a/package.json +++ b/package.json @@ -18,7 +18,6 @@ "connect-lastmile": "0.0.10", "debug": "^2.1.1", "express": "^4.11.2", - "forever": "^0.14.1", "json": "^9.0.3", "morgan": "^1.5.1", "proxy-middleware": "^0.11.0", From 04726ba697790f76178e7a93a9e9175df6475923 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 25 Aug 2015 23:21:30 -0700 Subject: [PATCH 034/234] Restore iptables before docker --- images/initializeBaseUbuntuImage.sh | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 3f2ea4854..fde52fb0b 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -91,6 +91,7 @@ apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8 apt-get update apt-get -y install lxc-docker-1.7.0 ln -sf /usr/bin/docker.io /usr/local/bin/docker +systemctl enable docker if [ ! -f "${DOCKER_DATA_FILE}" ]; then systemctl stop docker @@ -199,7 +200,7 @@ cd "${INSTALLER_SOURCE_DIR}" && npm install --production echo "==== Make the user own his home ====" chown "${USER}:${USER}" -R "/home/${USER}" -echo "==== Install systemd script ====" +echo "==== Install installer systemd script ====" cat > /etc/systemd/user/cloudron-installer.service < /etc/systemd/system/iptables-restore.service < Date: Tue, 25 Aug 2015 23:43:04 -0700 Subject: [PATCH 035/234] Set RemainAfterExit https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=750683 --- images/initializeBaseUbuntuImage.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index fde52fb0b..8a5a714d2 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -232,6 +232,7 @@ Requires=docker.service [Service] Type=oneshot ExecStart=/sbin/iptables-restore /etc/iptables/rules.v4 +RemainAfterExit=yes [Install] WantedBy=multi-user.target From 83ef4234bcb0bec0d2d0d44571b4ed6d02bd29c8 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 25 Aug 2015 23:43:42 -0700 Subject: [PATCH 036/234] Fix typo --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 8a5a714d2..434b42a8b 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -201,7 +201,7 @@ echo "==== Make the user own his home ====" chown "${USER}:${USER}" -R "/home/${USER}" echo "==== Install installer systemd script ====" -cat > /etc/systemd/user/cloudron-installer.service < /etc/systemd/system/cloudron-installer.service < Date: Wed, 26 Aug 2015 00:07:15 -0700 Subject: [PATCH 037/234] Not sure why this is here --- images/initializeBaseUbuntuImage.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 434b42a8b..9e2a62717 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -209,9 +209,6 @@ Type=idle [Service] ExecStart="${INSTALLER_SOURCE_DIR}/src/server.js" Environment="DEBUG=installer*,connect-lastmile" -# hack to fix race between iptables-restore and docker -ExecStart=/bin/bash -c "iptables-restore < /etc/iptables/rules.v4" -ExecStart=systemctl restart docker KillMode=process Restart=on-failure From a4f77dfcd047657011c34a08c749d897c8e5c3c3 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 08:48:13 -0700 Subject: [PATCH 038/234] Create systemd service to allocate swap This can be used to make the swap creation dynamic based on the ram in the cloudron --- images/initializeBaseUbuntuImage.sh | 37 ++++++++++++++--------------- systemd/allocate-swap.sh | 29 ++++++++++++++++++++++ 2 files changed, 47 insertions(+), 19 deletions(-) create mode 100755 systemd/allocate-swap.sh diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 9e2a62717..a24d72092 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -23,23 +23,6 @@ echo "=== Yellowtent base image preparation (installer revision - ${INSTALLER_RE export DEBIAN_FRONTEND=noninteractive -# Allocate two sets of swap files - one for general app usage and another for backup -# The backup swap is setup for swap on the fly by the backup scripts -echo "=== Setup swap file ===" -apps_swap_file="/apps.swap" -[[ -f "${apps_swap_file}" ]] && swapoff "${apps_swap_file}" -fallocate -l 1024m "${apps_swap_file}" -chmod 600 "${apps_swap_file}" -mkswap "${apps_swap_file}" -swapon "${apps_swap_file}" -echo "${apps_swap_file} none swap sw 0 0" >> /etc/fstab - -backup_swap_file="/backup.swap" -[[ -f "${backup_swap_file}" ]] && swapoff "${backup_swap_file}" -fallocate -l 1024m "${backup_swap_file}" -chmod 600 "${backup_swap_file}" -mkswap "${backup_swap_file}" - echo "==== Install project dependencies ====" apt-get update @@ -224,7 +207,6 @@ cat > /etc/systemd/system/iptables-restore.service < /etc/systemd/system/allocate-swap.service <> /etc/fstab +else + echo "Apps Swap file already exists" +fi + +if [[ ! -f "${BACKUP_SWAP_FILE}" ]]; then + echo "Creating Backup swap file" + fallocate -l 1024m "${BACKUP_SWAP_FILE}" + chmod 600 "${BACKUP_SWAP_FILE}" + mkswap "${BACKUP_SWAP_FILE}" +else + echo "Backups Swap file already exists" +fi From 3a1bfa91d16fcb1295bc0beb1c9be3b49c9d4fb5 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 09:23:30 -0700 Subject: [PATCH 039/234] Create app and data partitions dynamically --- images/initializeBaseUbuntuImage.sh | 47 +++++----------------- systemd/allocate-swap.sh | 29 -------------- systemd/box-setup.sh | 62 +++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 66 deletions(-) delete mode 100755 systemd/allocate-swap.sh create mode 100755 systemd/box-setup.sh diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index a24d72092..78a8fceeb 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -8,8 +8,6 @@ readonly DATA_DIR="${USER_HOME}/data" readonly APPDATA="${DATA_DIR}/appdata" readonly INSTALLER_SOURCE_DIR="${USER_HOME}/installer" readonly INSTALLER_REVISION="$1" -readonly DOCKER_DATA_FILE="/root/docker_data.img" -readonly USER_HOME_FILE="/root/user_home.img" readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "${SOURCE_DIR}/INFRA_VERSION" @@ -67,6 +65,9 @@ iptables -A INPUT -j LOGGING # last rule in INPUT chain iptables -A LOGGING -m limit --limit 2/min -j LOG --log-prefix "IPTables Packet Dropped: " --log-level 7 iptables -A LOGGING -j DROP +echo "==== Install btrfs tools" +apt-get -y install btrfs-tools + echo "==== Install docker ====" # see http://idolstarastronomer.com/painless-docker.html echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list @@ -75,28 +76,10 @@ apt-get update apt-get -y install lxc-docker-1.7.0 ln -sf /usr/bin/docker.io /usr/local/bin/docker systemctl enable docker +systemctl start docker -if [ ! -f "${DOCKER_DATA_FILE}" ]; then - systemctl stop docker - if aufs_mounts=$(grep 'aufs' /proc/mounts | awk '{print$2}' | sort -r); then - umount -l "${aufs_mounts}" - fi - rm -rf /var/lib/docker - mkdir /var/lib/docker - - # create a separate 12GB fs for docker images - # dd if=/dev/zero of=/root/docker_data.img bs=1M count=12000 - apt-get -y install btrfs-tools - truncate -s 12G "${DOCKER_DATA_FILE}" - mkfs.btrfs -L DockerData "${DOCKER_DATA_FILE}" - echo "${DOCKER_DATA_FILE} /var/lib/docker btrfs loop,nosuid 0 0" >> /etc/fstab - echo 'DOCKER_OPTS="-s btrfs"' >> /etc/default/docker - mount "${DOCKER_DATA_FILE}" - - systemctl start docker - # give docker sometime to start up and create iptables rules - sleep 10 -fi +# give docker sometime to start up and create iptables rules +sleep 10 # Disable forwarding to metadata route from containers iptables -I FORWARD -d 169.254.169.254 -j DROP @@ -153,16 +136,6 @@ echo "==== Install collectd ===" apt-get install -y collectd collectd-utils update-rc.d -f collectd remove -echo "==== Seting up btrfs user home ===" -if [[ ! -f "${USER_HOME_FILE}" ]]; then - # create a separate 12GB fs for data - truncate -s 12G "${USER_HOME_FILE}" - mkfs.btrfs -L UserHome "${USER_HOME_FILE}" - echo "${USER_HOME_FILE} ${USER_HOME} btrfs loop,nosuid 0 0" >> /etc/fstab - mount "${USER_HOME_FILE}" - btrfs subvolume create "${USER_HOME}/data" -fi - echo "=== Install tmpreaper ===" apt-get install -y tmpreaper sed -e 's/SHOWWARNING=true/# SHOWWARNING=true/' -i /etc/tmpreaper.conf @@ -221,20 +194,20 @@ systemctl enable iptables-restore # Allocate swap files echo "==== Install iptables-restore systemd script ====" -cat > /etc/systemd/system/allocate-swap.service < /etc/systemd/system/box-setup.service <> /etc/fstab -else - echo "Apps Swap file already exists" -fi - -if [[ ! -f "${BACKUP_SWAP_FILE}" ]]; then - echo "Creating Backup swap file" - fallocate -l 1024m "${BACKUP_SWAP_FILE}" - chmod 600 "${BACKUP_SWAP_FILE}" - mkswap "${BACKUP_SWAP_FILE}" -else - echo "Backups Swap file already exists" -fi diff --git a/systemd/box-setup.sh b/systemd/box-setup.sh new file mode 100755 index 000000000..a21a70a1b --- /dev/null +++ b/systemd/box-setup.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +set -eu -o pipefail + +readonly APPS_SWAP_FILE="/apps.swap" +readonly BACKUP_SWAP_FILE="/backup.swap" # used when doing app backups +readonly USER_HOME_FILE="/root/user_home.img" +readonly DOCKER_DATA_FILE="/root/docker_data.img" + +# Allocate two sets of swap files - one for general app usage and another for backup +# The backup swap is setup for swap on the fly by the backup scripts +if [[ ! -f "${APPS_SWAP_FILE}" ]]; then + physical_memory=$(free -m | awk '/Mem:/ { print $2 }') + echo "Creating Apps swap file of size ${physical_memory}m" + fallocate -l "${physical_memory}m" "${APPS_SWAP_FILE}" + chmod 600 "${APPS_SWAP_FILE}" + mkswap "${APPS_SWAP_FILE}" + swapon "${APPS_SWAP_FILE}" + echo "${APPS_SWAP_FILE} none swap sw 0 0" >> /etc/fstab +else + echo "Apps Swap file already exists" +fi + +if [[ ! -f "${BACKUP_SWAP_FILE}" ]]; then + echo "Creating Backup swap file" + fallocate -l 1024m "${BACKUP_SWAP_FILE}" + chmod 600 "${BACKUP_SWAP_FILE}" + mkswap "${BACKUP_SWAP_FILE}" +else + echo "Backups Swap file already exists" +fi + +if [[ ! -f "${USER_HOME_FILE}" ]]; then + echo "Seting up btrfs user home" + disk_size=$(fdisk -l /dev/vda1 | grep 'Disk /dev/vda1' | awk '{ print $3 }') + # create a separate 12GB fs for data + truncate -s 12G "${USER_HOME_FILE}" + mkfs.btrfs -L UserHome "${USER_HOME_FILE}" + echo "${USER_HOME_FILE} ${USER_HOME} btrfs loop,nosuid 0 0" >> /etc/fstab + mount "${USER_HOME_FILE}" + btrfs subvolume create "${USER_HOME}/data" +else + echo "Home is already btrfs" +fi + +if [ ! -f "${DOCKER_DATA_FILE}" ]; then + echo "Settings up btrfs docker" + if aufs_mounts=$(grep 'aufs' /proc/mounts | awk '{ print $2 }' | sort -r); then + umount -l "${aufs_mounts}" + fi + rm -rf /var/lib/docker + mkdir /var/lib/docker + + # create a separate 12GB fs for docker images + truncate -s 12G "${DOCKER_DATA_FILE}" + mkfs.btrfs -L DockerData "${DOCKER_DATA_FILE}" + echo "${DOCKER_DATA_FILE} /var/lib/docker btrfs loop,nosuid 0 0" >> /etc/fstab + echo 'DOCKER_OPTS="-s btrfs"' >> /etc/default/docker + mount "${DOCKER_DATA_FILE}" +else + echo "Docker is already btrfs" +fi From 23a5a275f8d48fc9e84f33ce223d91309ef59daf Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Wed, 26 Aug 2015 10:54:24 -0700 Subject: [PATCH 040/234] Reread box config from args.data --- src/installer.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/installer.js b/src/installer.js index 83bdb8616..3d0665aeb 100644 --- a/src/installer.js +++ b/src/installer.js @@ -78,7 +78,7 @@ function provision(args, callback) { var pargs = [ INSTALLER_CMD ]; pargs.push('--sourcetarballurl', args.sourceTarballUrl); - pargs.push('--data', JSON.stringify(args)); + pargs.push('--data', JSON.stringify(args.data)); debug('provision: calling with args %j', pargs); From f0fd4ea45c98cbe6802ae29dcdbfcb02b03234a0 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Wed, 26 Aug 2015 11:03:17 -0700 Subject: [PATCH 041/234] Fixup tests after removing provisioning routes --- src/server.js | 3 +- src/test/installer-test.js | 98 -------------------------------------- 2 files changed, 2 insertions(+), 99 deletions(-) diff --git a/src/server.js b/src/server.js index ba793ad5b..c09196724 100755 --- a/src/server.js +++ b/src/server.js @@ -53,7 +53,8 @@ function checkData(data) { function update(req, res, next) { assert.strictEqual(typeof req.body, 'object'); - if (typeof req.body.sourceTarballUrl !== 'string') return next(new HttpError(400, 'No sourceTarballUrl provided')); + if (!req.body.sourceTarballUrl || typeof req.body.sourceTarballUrl !== 'string') return next(new HttpError(400, 'No sourceTarballUrl provided')); + if (!req.body.data || typeof req.body.data !== 'object') return next(new HttpError(400, 'No data provided')); debug('provision: received from box %j', req.body); diff --git a/src/test/installer-test.js b/src/test/installer-test.js index 6d7670282..262d5ead7 100644 --- a/src/test/installer-test.js +++ b/src/test/installer-test.js @@ -108,104 +108,6 @@ describe('Server', function () { }); }); - describe('provision - restore', function () { - var data = { - sourceTarballUrl: 'https://sourceTarballUrl', - - data: { - boxVersionsUrl: 'https://versions.json', - version: '0.1', - restoreUrl: 'https://restoreurl', - restoreKey: 'somebackupkey', - token: 'sometoken', - apiServerOrigin: APPSERVER_ORIGIN, - webServerOrigin: 'https://somethingelse.com', - fqdn: 'www.something.com', - tlsKey: 'key', - tlsCert: 'cert' - } - }; - - before(function (done) { - process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; // TODO: use a installer ca signed cert instead - server.start(done); - }); - - after(function (done) { - server.stop(done); - delete process.env.NODE_TLS_REJECT_UNAUTHORIZED; - }); - - Object.keys(data).forEach(function (key) { - it('fails due to missing ' + key, function (done) { - var dataCopy = _.merge({ }, data); - delete dataCopy[key]; - - request.post(EXTERNAL_SERVER_URL + '/api/v1/installer/restore').send(dataCopy).end(function (error, result) { - expect(error).to.not.be.ok(); - expect(result.statusCode).to.equal(400); - done(); - }); - }); - }); - - it('succeeds', function (done) { - request.post(EXTERNAL_SERVER_URL + '/api/v1/installer/restore').send(data).end(function (error, result) { - expect(error).to.not.be.ok(); - expect(result.statusCode).to.equal(202); - done(); - }); - }); - }); - - describe('provision - provision', function () { - var data = { - sourceTarballUrl: 'https://sourceTarballUrl', - - data: { - boxVersionsUrl: 'https://versions.json', - version: '0.1', - token: 'sometoken', - apiServerOrigin: APPSERVER_ORIGIN, - webServerOrigin: 'https://somethingelse.com', - fqdn: 'www.something.com', - tlsKey: 'key', - tlsCert: 'cert' - } - }; - - before(function (done) { - process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; // TODO: use a installer ca signed cert instead - server.start(done); - }); - - after(function (done) { - server.stop(done); - delete process.env.NODE_TLS_REJECT_UNAUTHORIZED; - }); - - Object.keys(data).forEach(function (key) { - it('fails due to missing ' + key, function (done) { - var dataCopy = _.merge({ }, data); - delete dataCopy[key]; - - request.post(EXTERNAL_SERVER_URL + '/api/v1/installer/provision').send(dataCopy).end(function (error, result) { - expect(error).to.not.be.ok(); - expect(result.statusCode).to.equal(400); - done(); - }); - }); - }); - - it('succeeds', function (done) { - request.post(EXTERNAL_SERVER_URL + '/api/v1/installer/provision').send(data).end(function (error, result) { - expect(error).to.not.be.ok(); - expect(result.statusCode).to.equal(202); - done(); - }); - }); - }); - describe('logs', function () { before(function (done) { process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; // TODO: use a installer ca signed cert instead From 219032bbbb7e5c0033c4608b095d37b7a00dae32 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 11:30:45 -0700 Subject: [PATCH 042/234] Dynamically size the home and docker partitions --- systemd/box-setup.sh | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/systemd/box-setup.sh b/systemd/box-setup.sh index a21a70a1b..18315fef2 100755 --- a/systemd/box-setup.sh +++ b/systemd/box-setup.sh @@ -7,10 +7,16 @@ readonly BACKUP_SWAP_FILE="/backup.swap" # used when doing app backups readonly USER_HOME_FILE="/root/user_home.img" readonly DOCKER_DATA_FILE="/root/docker_data.img" +readonly physical_memory=$(free -m | awk '/Mem:/ { print $2 }') +readonly app_count=$((${physical_memory} / 200)) # estimated app count +readonly docker_data_size=$((6 * 1024 + app_count * 500)) # 6gb base + 500m for each app +readonly disk_size_gb=$(fdisk -l /dev/vda1 | grep 'Disk /dev/vda1' | awk '{ print $3 }') +readonly disk_size_mb=$((disk_size_gb * 1024)) +readonly backup_swap=1024 + # Allocate two sets of swap files - one for general app usage and another for backup # The backup swap is setup for swap on the fly by the backup scripts if [[ ! -f "${APPS_SWAP_FILE}" ]]; then - physical_memory=$(free -m | awk '/Mem:/ { print $2 }') echo "Creating Apps swap file of size ${physical_memory}m" fallocate -l "${physical_memory}m" "${APPS_SWAP_FILE}" chmod 600 "${APPS_SWAP_FILE}" @@ -23,26 +29,13 @@ fi if [[ ! -f "${BACKUP_SWAP_FILE}" ]]; then echo "Creating Backup swap file" - fallocate -l 1024m "${BACKUP_SWAP_FILE}" + fallocate -l "${backup_swap}m" "${BACKUP_SWAP_FILE}" chmod 600 "${BACKUP_SWAP_FILE}" mkswap "${BACKUP_SWAP_FILE}" else echo "Backups Swap file already exists" fi -if [[ ! -f "${USER_HOME_FILE}" ]]; then - echo "Seting up btrfs user home" - disk_size=$(fdisk -l /dev/vda1 | grep 'Disk /dev/vda1' | awk '{ print $3 }') - # create a separate 12GB fs for data - truncate -s 12G "${USER_HOME_FILE}" - mkfs.btrfs -L UserHome "${USER_HOME_FILE}" - echo "${USER_HOME_FILE} ${USER_HOME} btrfs loop,nosuid 0 0" >> /etc/fstab - mount "${USER_HOME_FILE}" - btrfs subvolume create "${USER_HOME}/data" -else - echo "Home is already btrfs" -fi - if [ ! -f "${DOCKER_DATA_FILE}" ]; then echo "Settings up btrfs docker" if aufs_mounts=$(grep 'aufs' /proc/mounts | awk '{ print $2 }' | sort -r); then @@ -51,8 +44,7 @@ if [ ! -f "${DOCKER_DATA_FILE}" ]; then rm -rf /var/lib/docker mkdir /var/lib/docker - # create a separate 12GB fs for docker images - truncate -s 12G "${DOCKER_DATA_FILE}" + truncate -s "${docker_data_size}M" "${DOCKER_DATA_FILE}" mkfs.btrfs -L DockerData "${DOCKER_DATA_FILE}" echo "${DOCKER_DATA_FILE} /var/lib/docker btrfs loop,nosuid 0 0" >> /etc/fstab echo 'DOCKER_OPTS="-s btrfs"' >> /etc/default/docker @@ -60,3 +52,16 @@ if [ ! -f "${DOCKER_DATA_FILE}" ]; then else echo "Docker is already btrfs" fi + +if [[ ! -f "${USER_HOME_FILE}" ]]; then + echo "Seting up btrfs user home" + # create a separate 12GB fs for data + home_data_size=$((disk_size_mb - 2048 - docker_data_size - physical_memory - backup_swap)) + truncate -s "${home_data_size}M" "${USER_HOME_FILE}" + mkfs.btrfs -L UserHome "${USER_HOME_FILE}" + echo "${USER_HOME_FILE} ${USER_HOME} btrfs loop,nosuid 0 0" >> /etc/fstab + mount "${USER_HOME_FILE}" + btrfs subvolume create "${USER_HOME}/data" +else + echo "Home is already btrfs" +fi From 9b97e26b58e256bb237a39e7e45693c7f0ecbf64 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 12:15:47 -0700 Subject: [PATCH 043/234] Use fallocate everywhere --- systemd/box-setup.sh | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/systemd/box-setup.sh b/systemd/box-setup.sh index 18315fef2..ea48dc2ae 100755 --- a/systemd/box-setup.sh +++ b/systemd/box-setup.sh @@ -7,17 +7,18 @@ readonly BACKUP_SWAP_FILE="/backup.swap" # used when doing app backups readonly USER_HOME_FILE="/root/user_home.img" readonly DOCKER_DATA_FILE="/root/docker_data.img" +# all sizes are in mb readonly physical_memory=$(free -m | awk '/Mem:/ { print $2 }') readonly app_count=$((${physical_memory} / 200)) # estimated app count readonly docker_data_size=$((6 * 1024 + app_count * 500)) # 6gb base + 500m for each app readonly disk_size_gb=$(fdisk -l /dev/vda1 | grep 'Disk /dev/vda1' | awk '{ print $3 }') -readonly disk_size_mb=$((disk_size_gb * 1024)) -readonly backup_swap=1024 +readonly disk_size=$((disk_size_gb * 1024)) +readonly backup_swap_size=1024 # Allocate two sets of swap files - one for general app usage and another for backup # The backup swap is setup for swap on the fly by the backup scripts if [[ ! -f "${APPS_SWAP_FILE}" ]]; then - echo "Creating Apps swap file of size ${physical_memory}m" + echo "Creating Apps swap file of size ${physical_memory}M" fallocate -l "${physical_memory}m" "${APPS_SWAP_FILE}" chmod 600 "${APPS_SWAP_FILE}" mkswap "${APPS_SWAP_FILE}" @@ -28,8 +29,8 @@ else fi if [[ ! -f "${BACKUP_SWAP_FILE}" ]]; then - echo "Creating Backup swap file" - fallocate -l "${backup_swap}m" "${BACKUP_SWAP_FILE}" + echo "Creating Backup swap file of size ${backup_swap_size}M" + fallocate -l "${backup_swap_size}m" "${BACKUP_SWAP_FILE}" chmod 600 "${BACKUP_SWAP_FILE}" mkswap "${BACKUP_SWAP_FILE}" else @@ -37,14 +38,14 @@ else fi if [ ! -f "${DOCKER_DATA_FILE}" ]; then - echo "Settings up btrfs docker" if aufs_mounts=$(grep 'aufs' /proc/mounts | awk '{ print $2 }' | sort -r); then umount -l "${aufs_mounts}" fi rm -rf /var/lib/docker mkdir /var/lib/docker - truncate -s "${docker_data_size}M" "${DOCKER_DATA_FILE}" + echo "Settings up btrfs docker of size ${docker_data_size}M" + fallocate -l "${docker_data_size}m" "${DOCKER_DATA_FILE}" mkfs.btrfs -L DockerData "${DOCKER_DATA_FILE}" echo "${DOCKER_DATA_FILE} /var/lib/docker btrfs loop,nosuid 0 0" >> /etc/fstab echo 'DOCKER_OPTS="-s btrfs"' >> /etc/default/docker @@ -54,10 +55,9 @@ else fi if [[ ! -f "${USER_HOME_FILE}" ]]; then - echo "Seting up btrfs user home" - # create a separate 12GB fs for data - home_data_size=$((disk_size_mb - 2048 - docker_data_size - physical_memory - backup_swap)) - truncate -s "${home_data_size}M" "${USER_HOME_FILE}" + home_data_size=$((disk_size - 2048 - docker_data_size - physical_memory - backup_swap_size)) + echo "Setting up btrfs user home of size ${home_data_size}M" + fallocate -l "${home_data_size}m" "${USER_HOME_FILE}" mkfs.btrfs -L UserHome "${USER_HOME_FILE}" echo "${USER_HOME_FILE} ${USER_HOME} btrfs loop,nosuid 0 0" >> /etc/fstab mount "${USER_HOME_FILE}" From 3f9ae5d6bf2a78e800255faa74c946ce8fae1bfe Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 12:22:32 -0700 Subject: [PATCH 044/234] refactor size calculation --- systemd/box-setup.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/systemd/box-setup.sh b/systemd/box-setup.sh index ea48dc2ae..4316d0c13 100755 --- a/systemd/box-setup.sh +++ b/systemd/box-setup.sh @@ -9,17 +9,19 @@ readonly DOCKER_DATA_FILE="/root/docker_data.img" # all sizes are in mb readonly physical_memory=$(free -m | awk '/Mem:/ { print $2 }') +readonly swap_size="${physical_memory}" readonly app_count=$((${physical_memory} / 200)) # estimated app count readonly docker_data_size=$((6 * 1024 + app_count * 500)) # 6gb base + 500m for each app readonly disk_size_gb=$(fdisk -l /dev/vda1 | grep 'Disk /dev/vda1' | awk '{ print $3 }') readonly disk_size=$((disk_size_gb * 1024)) readonly backup_swap_size=1024 +readonly system_size=2048 # 2 gigs for system libs # Allocate two sets of swap files - one for general app usage and another for backup # The backup swap is setup for swap on the fly by the backup scripts if [[ ! -f "${APPS_SWAP_FILE}" ]]; then - echo "Creating Apps swap file of size ${physical_memory}M" - fallocate -l "${physical_memory}m" "${APPS_SWAP_FILE}" + echo "Creating Apps swap file of size ${swap_size}M" + fallocate -l "${swap_size}m" "${APPS_SWAP_FILE}" chmod 600 "${APPS_SWAP_FILE}" mkswap "${APPS_SWAP_FILE}" swapon "${APPS_SWAP_FILE}" @@ -55,7 +57,7 @@ else fi if [[ ! -f "${USER_HOME_FILE}" ]]; then - home_data_size=$((disk_size - 2048 - docker_data_size - physical_memory - backup_swap_size)) + home_data_size=$((disk_size - system_size - docker_data_size - swap_size - backup_swap_size)) echo "Setting up btrfs user home of size ${home_data_size}M" fallocate -l "${home_data_size}m" "${USER_HOME_FILE}" mkfs.btrfs -L UserHome "${USER_HOME_FILE}" From d2bde5f0b189e61be41f8817608c092fbc868774 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 13:17:42 -0700 Subject: [PATCH 045/234] Leave 5G for the system --- systemd/box-setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/systemd/box-setup.sh b/systemd/box-setup.sh index 4316d0c13..5a31f37fa 100755 --- a/systemd/box-setup.sh +++ b/systemd/box-setup.sh @@ -15,7 +15,7 @@ readonly docker_data_size=$((6 * 1024 + app_count * 500)) # 6gb base + 500m for readonly disk_size_gb=$(fdisk -l /dev/vda1 | grep 'Disk /dev/vda1' | awk '{ print $3 }') readonly disk_size=$((disk_size_gb * 1024)) readonly backup_swap_size=1024 -readonly system_size=2048 # 2 gigs for system libs +readonly system_size=5120 # 5 gigs for system libs and tmp # Allocate two sets of swap files - one for general app usage and another for backup # The backup swap is setup for swap on the fly by the backup scripts From 8d0d19132ea376c08a90b507cedcef06f6761e71 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 13:51:32 -0700 Subject: [PATCH 046/234] Add missing variable --- systemd/box-setup.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/systemd/box-setup.sh b/systemd/box-setup.sh index 5a31f37fa..3a4f5e8c9 100755 --- a/systemd/box-setup.sh +++ b/systemd/box-setup.sh @@ -2,6 +2,7 @@ set -eu -o pipefail +readonly USER_HOME="/home/yellowtent" readonly APPS_SWAP_FILE="/apps.swap" readonly BACKUP_SWAP_FILE="/backup.swap" # used when doing app backups readonly USER_HOME_FILE="/root/user_home.img" From ec7a61021f970638c2094f6c1bd153164bf1c957 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 14:29:54 -0700 Subject: [PATCH 047/234] create single btrfs partition apps and data consume space from the same btrfs partition now --- images/initializeBaseUbuntuImage.sh | 20 +++++++++++++-- systemd/box-setup.sh | 40 ++++++++--------------------- 2 files changed, 29 insertions(+), 31 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 78a8fceeb..66ea022e8 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -8,6 +8,8 @@ readonly DATA_DIR="${USER_HOME}/data" readonly APPDATA="${DATA_DIR}/appdata" readonly INSTALLER_SOURCE_DIR="${USER_HOME}/installer" readonly INSTALLER_REVISION="$1" +readonly USER_DATA_FILE="/root/user_data.img" +readonly USER_DATA_DIR="/home/yellowtent/data" readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "${SOURCE_DIR}/INFRA_VERSION" @@ -75,10 +77,24 @@ apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8 apt-get update apt-get -y install lxc-docker-1.7.0 ln -sf /usr/bin/docker.io /usr/local/bin/docker + +echo "=== Remove existing aufs mounts ===" +systemctl stop docker +if aufs_mounts=$(grep 'aufs' /proc/mounts | awk '{ print $2 }' | sort -r); then + umount -l "${aufs_mounts}" +fi +rm -rf /var/lib/docker + +echo "=== Setup btrfs for preloading docker images ===" +fallocate -l "4096m" "${USER_DATA_FILE}" +mkfs.btrfs -L UserHome "${USER_DATA_FILE}" +echo "${USER_DATA_FILE} ${USER_DATA_DIR} btrfs loop,nosuid 0 0" >> /etc/fstab +mkdir -p "${USER_DATA_DIR}" && mount "${USER_DATA_FILE}" +echo "DOCKER_OPTS=\"-s btrfs -g ${DATA_DIR}/docker\"" >> /etc/default/docker systemctl enable docker -systemctl start docker # give docker sometime to start up and create iptables rules +systemctl start docker sleep 10 # Disable forwarding to metadata route from containers @@ -193,7 +209,7 @@ EOF systemctl enable iptables-restore # Allocate swap files -echo "==== Install iptables-restore systemd script ====" +echo "==== Install box-setup systemd script ====" cat > /etc/systemd/system/box-setup.service <> /etc/fstab - echo 'DOCKER_OPTS="-s btrfs"' >> /etc/default/docker - mount "${DOCKER_DATA_FILE}" -else - echo "Docker is already btrfs" -fi - -if [[ ! -f "${USER_HOME_FILE}" ]]; then - home_data_size=$((disk_size - system_size - docker_data_size - swap_size - backup_swap_size)) - echo "Setting up btrfs user home of size ${home_data_size}M" - fallocate -l "${home_data_size}m" "${USER_HOME_FILE}" - mkfs.btrfs -L UserHome "${USER_HOME_FILE}" - echo "${USER_HOME_FILE} ${USER_HOME} btrfs loop,nosuid 0 0" >> /etc/fstab - mount "${USER_HOME_FILE}" - btrfs subvolume create "${USER_HOME}/data" +if [[ ! -f "${USER_DATA_FILE}" ]]; then + home_data_size=$((disk_size - system_size - swap_size - backup_swap_size)) + echo "Resizing up btrfs user data to size ${home_data_size}M" + umount "${USER_DATA_DIR}" + fallocate -l "${home_data_size}m" "${USER_DATA_FILE}" # does not overwrite existing data + mount "${USER_DATA_FILE}" + btrfs filesystem resize max "${USER_DATA_DIR}" else echo "Home is already btrfs" fi + From b51cb9d84a63d8f86e55bf5b156cc8df4cac7a33 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 15:22:53 -0700 Subject: [PATCH 048/234] Remove redundant variable --- images/initializeBaseUbuntuImage.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 66ea022e8..92f8e3e5b 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -4,8 +4,6 @@ set -euv -o pipefail readonly USER=yellowtent readonly USER_HOME="/home/${USER}" -readonly DATA_DIR="${USER_HOME}/data" -readonly APPDATA="${DATA_DIR}/appdata" readonly INSTALLER_SOURCE_DIR="${USER_HOME}/installer" readonly INSTALLER_REVISION="$1" readonly USER_DATA_FILE="/root/user_data.img" @@ -90,7 +88,8 @@ fallocate -l "4096m" "${USER_DATA_FILE}" mkfs.btrfs -L UserHome "${USER_DATA_FILE}" echo "${USER_DATA_FILE} ${USER_DATA_DIR} btrfs loop,nosuid 0 0" >> /etc/fstab mkdir -p "${USER_DATA_DIR}" && mount "${USER_DATA_FILE}" -echo "DOCKER_OPTS=\"-s btrfs -g ${DATA_DIR}/docker\"" >> /etc/default/docker +mkdir -p "${USER_DATA_DIR}/docker" +echo "DOCKER_OPTS=\"-s btrfs -g ${USER_DATA_DIR}/docker\"" >> /etc/default/docker systemctl enable docker # give docker sometime to start up and create iptables rules From af8f4676badc0b4bd7d147478058425c72067534 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 15:32:40 -0700 Subject: [PATCH 049/234] systemd does not use /etc/default/docker --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 92f8e3e5b..e8f065a61 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -89,7 +89,7 @@ mkfs.btrfs -L UserHome "${USER_DATA_FILE}" echo "${USER_DATA_FILE} ${USER_DATA_DIR} btrfs loop,nosuid 0 0" >> /etc/fstab mkdir -p "${USER_DATA_DIR}" && mount "${USER_DATA_FILE}" mkdir -p "${USER_DATA_DIR}/docker" -echo "DOCKER_OPTS=\"-s btrfs -g ${USER_DATA_DIR}/docker\"" >> /etc/default/docker +sed -e "s,ExecStart=.*,ExecStart=/usr/bin/docker -d -H fd:// -s btrfs -g ${USER_DATA_DIR}/docker," -i /lib/systemd/system/docker.service systemctl enable docker # give docker sometime to start up and create iptables rules From c8b2b3413882560857862bfac3398c9f11c37e53 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 15:40:48 -0700 Subject: [PATCH 050/234] Always resize the data volume --- systemd/box-setup.sh | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/systemd/box-setup.sh b/systemd/box-setup.sh index 8999e9640..fc8272265 100755 --- a/systemd/box-setup.sh +++ b/systemd/box-setup.sh @@ -39,14 +39,11 @@ else echo "Backups Swap file already exists" fi -if [[ ! -f "${USER_DATA_FILE}" ]]; then - home_data_size=$((disk_size - system_size - swap_size - backup_swap_size)) - echo "Resizing up btrfs user data to size ${home_data_size}M" - umount "${USER_DATA_DIR}" - fallocate -l "${home_data_size}m" "${USER_DATA_FILE}" # does not overwrite existing data - mount "${USER_DATA_FILE}" - btrfs filesystem resize max "${USER_DATA_DIR}" -else - echo "Home is already btrfs" -fi +echo "Resizing data volume" +home_data_size=$((disk_size - system_size - swap_size - backup_swap_size)) +echo "Resizing up btrfs user data to size ${home_data_size}M" +umount "${USER_DATA_DIR}" +fallocate -l "${home_data_size}m" "${USER_DATA_FILE}" # does not overwrite existing data +mount "${USER_DATA_FILE}" +btrfs filesystem resize max "${USER_DATA_DIR}" From 5a67be2292d24400ef8150ddb624257c311b39bf Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 16:01:45 -0700 Subject: [PATCH 051/234] Change ownership only of installer/ --- images/initializeBaseUbuntuImage.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index e8f065a61..572702e3d 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -167,9 +167,7 @@ apt-get install -y nodejs echo "=== Rebuilding npm packages ===" cd "${INSTALLER_SOURCE_DIR}" && npm install --production - -echo "==== Make the user own his home ====" -chown "${USER}:${USER}" -R "/home/${USER}" +chown "${USER}:${USER}" -R "${INSTALLER_SOURCE_DIR}" echo "==== Install installer systemd script ====" cat > /etc/systemd/system/cloudron-installer.service < Date: Wed, 26 Aug 2015 16:02:51 -0700 Subject: [PATCH 052/234] Start with 8gb instead --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 572702e3d..4dda43222 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -84,7 +84,7 @@ fi rm -rf /var/lib/docker echo "=== Setup btrfs for preloading docker images ===" -fallocate -l "4096m" "${USER_DATA_FILE}" +fallocate -l "8192m" "${USER_DATA_FILE}" # 8gb start mkfs.btrfs -L UserHome "${USER_DATA_FILE}" echo "${USER_DATA_FILE} ${USER_DATA_DIR} btrfs loop,nosuid 0 0" >> /etc/fstab mkdir -p "${USER_DATA_DIR}" && mount "${USER_DATA_FILE}" From 6744621415f75000b19fed67e4ff31dbaf9e59e8 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 22:54:23 -0700 Subject: [PATCH 053/234] Print the detected ram and disk size --- systemd/box-setup.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/systemd/box-setup.sh b/systemd/box-setup.sh index fc8272265..410b5ad1d 100755 --- a/systemd/box-setup.sh +++ b/systemd/box-setup.sh @@ -17,6 +17,10 @@ readonly disk_size=$((disk_size_gb * 1024)) readonly backup_swap_size=1024 readonly system_size=5120 # 5 gigs for system libs, installer, box code and tmp +echo "Physical memory: ${physical_memory}" +echo "Estimated app count: ${app_count}" +echo "Disk size: ${disk_size}" + # Allocate two sets of swap files - one for general app usage and another for backup # The backup swap is setup for swap on the fly by the backup scripts if [[ ! -f "${APPS_SWAP_FILE}" ]]; then From c90e0fd21e6f00c8b5d7fca7f7b28573778f9f8f Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 26 Aug 2015 23:38:01 -0700 Subject: [PATCH 054/234] do-resize resizes the disk it seems --- images/initializeBaseUbuntuImage.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 4dda43222..471a0af4c 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -211,6 +211,7 @@ cat > /etc/systemd/system/box-setup.service < Date: Thu, 27 Aug 2015 18:49:52 -0700 Subject: [PATCH 055/234] Just pass the req.body through The metadata has things like restoreUrl and restoreKey which should not be passed through anyways --- src/server.js | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/src/server.js b/src/server.js index c09196724..3b4a33698 100755 --- a/src/server.js +++ b/src/server.js @@ -58,21 +58,11 @@ function update(req, res, next) { debug('provision: received from box %j', req.body); - superagent.get('http://169.254.169.254/metadata/v1.json').end(function (error, result) { - if (error || result.statusCode !== 200) { - console.error('Error getting metadata', error); - return; - } - - var userData = JSON.parse(result.body.user_data); - userData.sourceTarballUrl = req.body.sourceTarballUrl; - - installer.provision(userData, function (error) { - if (error) console.error(error); - }); - - next(new HttpSuccess(202, { })); + installer.provision(req.body, function (error) { + if (error) console.error(error); }); + + next(new HttpSuccess(202, { })); } function retire(req, res, next) { From e94f2a95de820410c5422534c50efb40d458501a Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 27 Aug 2015 18:51:02 -0700 Subject: [PATCH 056/234] Remove ununsed checkData --- src/server.js | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/src/server.js b/src/server.js index 3b4a33698..10fdab551 100755 --- a/src/server.js +++ b/src/server.js @@ -30,26 +30,6 @@ exports = module.exports = { var gHttpsServer = null, // provision server; used for install/restore gHttpServer = null; // update server; used for updates -// 'data' is opaque. the following code exists to help debugging -function checkData(data) { - assert.strictEqual(typeof data, 'object'); - - if (typeof data.token !== 'string') console.error('No token provided'); - if (typeof data.apiServerOrigin !== 'string') console.error('No apiServerOrigin provided'); - if (typeof data.webServerOrigin !== 'string') console.error('No webServerOrigin provided'); - if (typeof data.fqdn !== 'string') console.error('No fqdn provided'); - if (typeof data.tlsCert !== 'string') console.error('No TLS cert provided'); - if (typeof data.tlsKey !== 'string') console.error('No TLS key provided'); - if (typeof data.isCustomDomain !== 'boolean') console.error('No isCustomDomain provided'); - if (typeof data.version !== 'string') console.error('No version provided'); - if (typeof data.sourceTarballUrl !== 'string') console.error('No sourceTarballUrl provided'); - - if ('restoreUrl' in data) { - if (typeof data.restoreUrl !== 'string') console.error('No restoreUrl provided'); - if (typeof data.restoreKey !== 'string') console.error('No restoreKey provided'); - } -} - function update(req, res, next) { assert.strictEqual(typeof req.body, 'object'); From 56413ecce6e686c11f694f3e9126ce7ddf47599f Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Fri, 28 Aug 2015 10:46:22 -0700 Subject: [PATCH 057/234] Account for ext4 reserved space 2G - ram sawp 1G - backup swap 2G - reserved root@yellowtent:/# du -hcs bin 13M bin 13M total root@yellowtent:/# du -hcs boot 70M boot 70M total root@yellowtent:/# du -hcs etc 6.3M etc 6.3M total root@yellowtent:/# du -hcs lib 530M lib 530M total root@yellowtent:/# du -hcs var 634M var 634M total root@yellowtent:/# du -hcs root 33G root 33G total Filesystem Size Used Avail Use% Mounted on udev 990M 0 990M 0% /dev tmpfs 201M 960K 200M 1% /run /dev/vda1 40G 38G 0 100% / tmpfs 1001M 0 1001M 0% /dev/shm tmpfs 5.0M 0 5.0M 0% /run/lock tmpfs 1001M 0 1001M 0% /sys/fs/cgroup /dev/loop0 33G 7.9G 24G 25% /home/yellowtent/data tmpfs 201M 0 201M 0% /run/user/0 --- systemd/box-setup.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/systemd/box-setup.sh b/systemd/box-setup.sh index 410b5ad1d..4cded1cb4 100755 --- a/systemd/box-setup.sh +++ b/systemd/box-setup.sh @@ -16,6 +16,7 @@ readonly disk_size_gb=$(fdisk -l /dev/vda1 | grep 'Disk /dev/vda1' | awk '{ prin readonly disk_size=$((disk_size_gb * 1024)) readonly backup_swap_size=1024 readonly system_size=5120 # 5 gigs for system libs, installer, box code and tmp +readonly ext4_reserved=$((disk_size * 5 / 100)) # this can be changes using tune2fs -m percent /dev/vda1 echo "Physical memory: ${physical_memory}" echo "Estimated app count: ${app_count}" @@ -44,7 +45,7 @@ else fi echo "Resizing data volume" -home_data_size=$((disk_size - system_size - swap_size - backup_swap_size)) +home_data_size=$((disk_size - system_size - swap_size - backup_swap_size - ext4_reserved)) echo "Resizing up btrfs user data to size ${home_data_size}M" umount "${USER_DATA_DIR}" fallocate -l "${home_data_size}m" "${USER_DATA_FILE}" # does not overwrite existing data From 21c16d20094554f847bfbbf568cdc4e46342f00a Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Fri, 28 Aug 2015 11:21:43 -0700 Subject: [PATCH 058/234] 0.0.38 changes --- release/CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 25058eb1a..e90397de6 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -126,3 +126,7 @@ [0.0.37] - Prepare for Selfhosting 1. part - Use userData instead of provisioning calls + +[0.0.38] +- Account for Ext4 reserved block when partitioning disk + From eab33150ad189610be88d2671478530da9fd5865 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Sun, 30 Aug 2015 17:23:47 -0700 Subject: [PATCH 059/234] 0.0.39 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index e90397de6..077e390e3 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -130,3 +130,6 @@ [0.0.38] - Account for Ext4 reserved block when partitioning disk +[0.0.39] +- Move subdomain management to the cloudron + From b73fc70ecf734caa2b316f84c0abbb711cb4f750 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Sun, 30 Aug 2015 21:04:39 -0700 Subject: [PATCH 060/234] limit systemd journal size --- images/initializeBaseUbuntuImage.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 471a0af4c..a60a73ae6 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -21,10 +21,8 @@ echo "=== Yellowtent base image preparation (installer revision - ${INSTALLER_RE export DEBIAN_FRONTEND=noninteractive -echo "==== Install project dependencies ====" -apt-get update - echo "=== Upgrade ===" +apt-get update apt-get upgrade -y # Setup firewall before everything. Atleast docker 1.5 creates it's own chain and the -X below will remove it @@ -224,4 +222,7 @@ EOF systemctl enable box-setup +# Configure systemd +sed -e "s/^#SystemMaxUse=/SystemMaxUse=100M/" -i /etc/systemd/journald.conf + sync From 70c93c7be792640e10930f3fa543df81794d2a64 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Sun, 30 Aug 2015 21:10:57 -0700 Subject: [PATCH 061/234] Provision only once --- src/server.js | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/src/server.js b/src/server.js index 10fdab551..7d618c4ee 100755 --- a/src/server.js +++ b/src/server.js @@ -30,6 +30,21 @@ exports = module.exports = { var gHttpsServer = null, // provision server; used for install/restore gHttpServer = null; // update server; used for updates +function provision(callback) { + if (fs.existsSync('/home/yellowtent/configs/cloudron.conf')) return callback(null); // already provisioned + + superagent.get('http://169.254.169.254/metadata/v1.json').end(function (error, result) { + if (error || result.statusCode !== 200) { + console.error('Error getting metadata', error); + return; + } + + var userData = JSON.parse(result.body.user_data); + + installer.provision(userData, callback); + }); +} + function update(req, res, next) { assert.strictEqual(typeof req.body, 'object'); @@ -192,21 +207,11 @@ function start(callback) { debug('starting'); - // FIXME: this should only happen once - superagent.get('http://169.254.169.254/metadata/v1.json').end(function (error, result) { - if (error || result.statusCode !== 200) { - console.error('Error getting metadata', error); - return; - } - - var userData = JSON.parse(result.body.user_data); - - async.series([ - startUpdateServer, - startProvisionServer, - installer.provision.bind(null, userData) - ], callback); - }); + async.series([ + startUpdateServer, + startProvisionServer, + provision + ], callback); } function stop(callback) { From 222e6b66114d96611facc616a2458cccfe130fff Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 31 Aug 2015 09:32:12 -0700 Subject: [PATCH 062/234] 0.0.40 changes --- release/CHANGES | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 077e390e3..f7afa0fcd 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -133,3 +133,8 @@ [0.0.39] - Move subdomain management to the cloudron +[0.0.40] +- Add journal limit +- Fix reprovisioning on reboot +- Fix subdomain management during startup + From 77ada9c151466c1abbf61497f3048f3f175a2fae Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 31 Aug 2015 19:23:43 -0700 Subject: [PATCH 063/234] Copy upgrade flag --- release/release | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/release b/release/release index 7e7a36b37..0b773b6da 100755 --- a/release/release +++ b/release/release @@ -480,7 +480,7 @@ function stage(fromEnv, toEnv) { if (error) return exit(error); var sourceTarballName = url.parse(fromReleases[latestFromVersion].sourceTarballUrl).pathname.substr(1); - var upgrade = toReleases[latestToVersion].imageId !== toImage.id; + var upgrade = fromReleases[latestFromVersion].upgrade; console.log('Copying source code tarball %s to %s'.gray, sourceTarballName, toEnv.tag); From b08a6840f5cf0976565f61707a5cdd0f8991cda7 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Mon, 31 Aug 2015 21:49:37 -0700 Subject: [PATCH 064/234] changes for 0.0.41 --- release/CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index f7afa0fcd..b63ca7b91 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -138,3 +138,5 @@ - Fix reprovisioning on reboot - Fix subdomain management during startup +[0.0.41] +- Finally bring things to a sane state From d0d0d95475f5736cb207a2540ae0dd6600361077 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Sat, 5 Sep 2015 09:21:59 -0700 Subject: [PATCH 065/234] 0.0.42 changes --- release/CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index b63ca7b91..c710ee488 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -140,3 +140,7 @@ [0.0.41] - Finally bring things to a sane state + +[0.0.42] +- Parallel apptask + From 59d174004ecc7df7cbd645f3e8640a8be3083f6e Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 7 Sep 2015 11:19:16 -0700 Subject: [PATCH 066/234] box code has moved to systemd --- images/initializeBaseUbuntuImage.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index a60a73ae6..820fc1544 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -142,9 +142,6 @@ apt-get -y install mysql-server echo "==== Install pwgen ====" apt-get -y install pwgen -echo "==== Install supervisor ====" -apt-get -y install supervisor - echo "==== Install collectd ===" apt-get install -y collectd collectd-utils update-rc.d -f collectd remove From 0a679da96836c5e7d0cb775a8228f26ee19a6ba5 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 7 Sep 2015 14:10:34 -0700 Subject: [PATCH 067/234] Type belongs to service --- admin/admin | 2 +- images/initializeBaseUbuntuImage.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/admin/admin b/admin/admin index 7e79bb534..7611efeac 100755 --- a/admin/admin +++ b/admin/admin @@ -246,7 +246,7 @@ function sshExec(ip, cmds) { function hotfixCloudron(ip, code) { var CMDS = [ - { cmd: 'supervisorctl stop all' }, + { cmd: 'supervisorctl stop all' }, // FIXME!! { cmd: 'rm -rf /home/yellowtent/box/* /home/yellowtent/box/.*' }, { cmd: 'tar zxf - -C /home/yellowtent/box', stdin: fs.createReadStream(code) }, { cmd: 'cd /home/yellowtent/box && npm rebuild' }, diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 820fc1544..1b8b52253 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -168,9 +168,9 @@ echo "==== Install installer systemd script ====" cat > /etc/systemd/system/cloudron-installer.service < Date: Mon, 7 Sep 2015 14:25:13 -0700 Subject: [PATCH 068/234] stop systemd target instead of supervisor --- admin/admin | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/admin/admin b/admin/admin index 7611efeac..0db70b6d8 100755 --- a/admin/admin +++ b/admin/admin @@ -246,7 +246,7 @@ function sshExec(ip, cmds) { function hotfixCloudron(ip, code) { var CMDS = [ - { cmd: 'supervisorctl stop all' }, // FIXME!! + { cmd: 'systemd stop cloudron.target' }, { cmd: 'rm -rf /home/yellowtent/box/* /home/yellowtent/box/.*' }, { cmd: 'tar zxf - -C /home/yellowtent/box', stdin: fs.createReadStream(code) }, { cmd: 'cd /home/yellowtent/box && npm rebuild' }, From 47b662be09190aa58e8417f20aae931095778041 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 7 Sep 2015 20:53:26 -0700 Subject: [PATCH 069/234] Remove unnecessary alias --- images/initializeBaseUbuntuImage.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 1b8b52253..111e9d8ea 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -178,7 +178,6 @@ Restart=on-failure [Install] WantedBy=multi-user.target -Alias=installer.service EOF systemctl enable cloudron-installer From a5d122c0b3a691dc26708b4d6657b74ba046e43a Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 7 Sep 2015 22:37:20 -0700 Subject: [PATCH 070/234] Leave a note on singleshot After= behavior "Actually oneshot is also a bit special and that is where RemainAfterExit comes in. For oneshot, systemd waits for the process to exit before it starts any follow-up units (and with multiple ExecStarts I assume it waits for all of them). So that automatically leads to the scheme in berbae's last post. However, with RemainAfterExit, the unit remains active even though the process has exited, so this makes it look more like "normal" service with " --- images/initializeBaseUbuntuImage.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 111e9d8ea..a993b1524 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -200,6 +200,7 @@ EOF systemctl enable iptables-restore # Allocate swap files +# https://bbs.archlinux.org/viewtopic.php?id=194792 ensures this runs after do-resize.service echo "==== Install box-setup systemd script ====" cat > /etc/systemd/system/box-setup.service < Date: Mon, 7 Sep 2015 23:13:14 -0700 Subject: [PATCH 071/234] 0.0.43 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index c710ee488..67f71f3c5 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -144,3 +144,6 @@ [0.0.42] - Parallel apptask +[0.0.43] +- Move to systemd + From 27e481023930acb8506a047e9481e263802d7f23 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 8 Sep 2015 10:31:02 -0700 Subject: [PATCH 072/234] 0.0.44 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 67f71f3c5..2565fad30 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -147,3 +147,6 @@ [0.0.43] - Move to systemd +[0.0.44] +- Fix apptask concurrency bug + From 2fa3a3c47e7ed4177476f016bdaaa2b8e20ad95f Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 8 Sep 2015 12:58:06 -0700 Subject: [PATCH 073/234] 0.0.45 changes --- release/CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 2565fad30..a8fdc79bc 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -150,3 +150,5 @@ [0.0.44] - Fix apptask concurrency bug +[0.0.45] +- Retry subdomain registration From 422b65d9348d7c8a08292f0ce16a908e5702ebb4 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 9 Sep 2015 01:00:12 -0700 Subject: [PATCH 074/234] 0.0.46 changes --- release/CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index a8fdc79bc..96622e8e7 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -152,3 +152,7 @@ [0.0.45] - Retry subdomain registration + +[0.0.46] +- Fix app update email notification + From 6fe67c93fec3570c327f75276e390cbc4fd16bd7 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 9 Sep 2015 17:03:13 -0700 Subject: [PATCH 075/234] 0.0.47 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 96622e8e7..1fd89ed63 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -156,3 +156,6 @@ [0.0.46] - Fix app update email notification +[0.0.47] +- Ensure box code quits within 5 seconds + From 6243404d1d0ea1e382de35396c22464b5899001b Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Thu, 10 Sep 2015 14:39:30 +0200 Subject: [PATCH 076/234] 0.0.48 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 1fd89ed63..5cdc765ec 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -159,3 +159,6 @@ [0.0.47] - Ensure box code quits within 5 seconds +[0.0.48] +- Styling fixes +- Improved session handling From 8292e78ef26b228dd5a3f7e3732260335acdef3e Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 10 Sep 2015 09:44:53 -0700 Subject: [PATCH 077/234] Move ssh to port 919 --- admin/admin | 2 +- admin/cloudronLogin | 2 +- images/createDigitalOceanImage.sh | 15 ++++----------- images/initializeBaseUbuntuImage.sh | 5 ++++- 4 files changed, 10 insertions(+), 14 deletions(-) diff --git a/admin/admin b/admin/admin index 0db70b6d8..70af277de 100755 --- a/admin/admin +++ b/admin/admin @@ -209,7 +209,7 @@ function sshExec(ip, cmds) { var sshClient = new SshClient(); sshClient.connect({ host: ip, - port: 22, + port: 919, username: 'root', privateKey: fs.readFileSync(privateKey) }); diff --git a/admin/cloudronLogin b/admin/cloudronLogin index 4e62de8c7..8b92662fe 100755 --- a/admin/cloudronLogin +++ b/admin/cloudronLogin @@ -14,4 +14,4 @@ if [[ ! -f "${ssh_keys}" ]]; then exit 1 fi -ssh root@$1 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=10 -i "${ssh_keys}" +ssh root@$1 -p 919 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=10 -i "${ssh_keys}" diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index 4561c6f74..11eef464c 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -145,23 +145,16 @@ echo "Copying installer source" cd "${INSTALLER_DIR}" git archive --format=tar HEAD | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "root@${droplet_ip}" "cat - > /root/installer.tar" +echo "Copy over certs" +scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${INSTALLER_DIR}/../keys/installer/" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" +scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${INSTALLER_DIR}/../keys/installer_ca/ca.crt" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" + echo "Executing init script" if ! ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "root@${droplet_ip}" "/bin/bash /root/initializeBaseUbuntuImage.sh ${installer_revision}"; then echo "Init script failed" exit 1 fi -echo "Copy over certs" -scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${INSTALLER_DIR}/../keys/installer/" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" -scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${INSTALLER_DIR}/../keys/installer_ca/ca.crt" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" - -echo "Shutting down droplet with id : ${droplet_id}" -ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "root@${droplet_ip}" "shutdown -f now" || true # shutdown sometimes terminates ssh connection immediately making this command fail - -# wait 10 secs for actual shutdown -echo "Waiting for 10 seconds for droplet to shutdown" -sleep 30 - echo "Powering off droplet" power_off_droplet "${droplet_id}" diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index a993b1524..2a849d214 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -39,7 +39,7 @@ iptables -P OUTPUT ACCEPT # NOTE: keep these in sync with src/apps.js validatePortBindings # allow ssh, http, https, ping, dns iptables -I INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT -iptables -A INPUT -p tcp --dport 22 -j ACCEPT +iptables -A INPUT -p tcp --dport 919 -j ACCEPT # ssh iptables -A INPUT -p tcp -m tcp -m multiport --dports 80,443,886 -j ACCEPT iptables -A INPUT -p icmp --icmp-type echo-request -j ACCEPT iptables -A INPUT -p icmp --icmp-type echo-reply -j ACCEPT @@ -63,6 +63,9 @@ iptables -A INPUT -j LOGGING # last rule in INPUT chain iptables -A LOGGING -m limit --limit 2/min -j LOG --log-prefix "IPTables Packet Dropped: " --log-level 7 iptables -A LOGGING -j DROP +echo "==== Move ssh to port 919 ===" +sed -i "s/^Port .*/Port 919/" /etc/ssh/sshd_config + echo "==== Install btrfs tools" apt-get -y install btrfs-tools From a4d0394d1ad3e4646e508a802bb0e4d04b23903a Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 10 Sep 2015 11:40:27 -0700 Subject: [PATCH 078/234] Revert "Move ssh to port 919" This reverts commit 4e4890810f3a22e7ec990cc44381a2c243044d99. This change is not done yet --- admin/admin | 2 +- admin/cloudronLogin | 2 +- images/createDigitalOceanImage.sh | 15 +++++++++++---- images/initializeBaseUbuntuImage.sh | 5 +---- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/admin/admin b/admin/admin index 70af277de..0db70b6d8 100755 --- a/admin/admin +++ b/admin/admin @@ -209,7 +209,7 @@ function sshExec(ip, cmds) { var sshClient = new SshClient(); sshClient.connect({ host: ip, - port: 919, + port: 22, username: 'root', privateKey: fs.readFileSync(privateKey) }); diff --git a/admin/cloudronLogin b/admin/cloudronLogin index 8b92662fe..4e62de8c7 100755 --- a/admin/cloudronLogin +++ b/admin/cloudronLogin @@ -14,4 +14,4 @@ if [[ ! -f "${ssh_keys}" ]]; then exit 1 fi -ssh root@$1 -p 919 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=10 -i "${ssh_keys}" +ssh root@$1 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=10 -i "${ssh_keys}" diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index 11eef464c..4561c6f74 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -145,16 +145,23 @@ echo "Copying installer source" cd "${INSTALLER_DIR}" git archive --format=tar HEAD | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "root@${droplet_ip}" "cat - > /root/installer.tar" -echo "Copy over certs" -scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${INSTALLER_DIR}/../keys/installer/" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" -scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${INSTALLER_DIR}/../keys/installer_ca/ca.crt" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" - echo "Executing init script" if ! ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "root@${droplet_ip}" "/bin/bash /root/initializeBaseUbuntuImage.sh ${installer_revision}"; then echo "Init script failed" exit 1 fi +echo "Copy over certs" +scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${INSTALLER_DIR}/../keys/installer/" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" +scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${INSTALLER_DIR}/../keys/installer_ca/ca.crt" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" + +echo "Shutting down droplet with id : ${droplet_id}" +ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "root@${droplet_ip}" "shutdown -f now" || true # shutdown sometimes terminates ssh connection immediately making this command fail + +# wait 10 secs for actual shutdown +echo "Waiting for 10 seconds for droplet to shutdown" +sleep 30 + echo "Powering off droplet" power_off_droplet "${droplet_id}" diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 2a849d214..a993b1524 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -39,7 +39,7 @@ iptables -P OUTPUT ACCEPT # NOTE: keep these in sync with src/apps.js validatePortBindings # allow ssh, http, https, ping, dns iptables -I INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT -iptables -A INPUT -p tcp --dport 919 -j ACCEPT # ssh +iptables -A INPUT -p tcp --dport 22 -j ACCEPT iptables -A INPUT -p tcp -m tcp -m multiport --dports 80,443,886 -j ACCEPT iptables -A INPUT -p icmp --icmp-type echo-request -j ACCEPT iptables -A INPUT -p icmp --icmp-type echo-reply -j ACCEPT @@ -63,9 +63,6 @@ iptables -A INPUT -j LOGGING # last rule in INPUT chain iptables -A LOGGING -m limit --limit 2/min -j LOG --log-prefix "IPTables Packet Dropped: " --log-level 7 iptables -A LOGGING -j DROP -echo "==== Move ssh to port 919 ===" -sed -i "s/^Port .*/Port 919/" /etc/ssh/sshd_config - echo "==== Install btrfs tools" apt-get -y install btrfs-tools From 38884bc0e66eca938c3ec10d974a0cc5b5557345 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 10 Sep 2015 11:40:58 -0700 Subject: [PATCH 079/234] 0.0.49 changes --- release/CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 5cdc765ec..991a1ffda 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -162,3 +162,7 @@ [0.0.48] - Styling fixes - Improved session handling + +[0.0.49] +- Fix app autoupdate logic + From 04cf382de5120e4adaa898b8217310b2f18a70d0 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 10 Sep 2015 20:45:53 -0700 Subject: [PATCH 080/234] systemctl stop --- admin/admin | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/admin/admin b/admin/admin index 0db70b6d8..b976ed679 100755 --- a/admin/admin +++ b/admin/admin @@ -246,7 +246,7 @@ function sshExec(ip, cmds) { function hotfixCloudron(ip, code) { var CMDS = [ - { cmd: 'systemd stop cloudron.target' }, + { cmd: 'systemctl stop cloudron.target' }, { cmd: 'rm -rf /home/yellowtent/box/* /home/yellowtent/box/.*' }, { cmd: 'tar zxf - -C /home/yellowtent/box', stdin: fs.createReadStream(code) }, { cmd: 'cd /home/yellowtent/box && npm rebuild' }, From 11bf39374c355c04a8e9c1fed7c2fb3eec353b2d Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Mon, 14 Sep 2015 12:37:39 +0200 Subject: [PATCH 081/234] 0.0.50 changes --- release/CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 991a1ffda..30b7c2b41 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -166,3 +166,5 @@ [0.0.49] - Fix app autoupdate logic +[0.0.50] +- Use domainmanagement via CaaS From 02b4990cb148f1fdc0bc2792d174b4df9ada0db7 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 14 Sep 2015 12:24:22 -0700 Subject: [PATCH 082/234] Version 0.0.51 changes --- release/CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 30b7c2b41..c95c141c5 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -168,3 +168,7 @@ [0.0.50] - Use domainmanagement via CaaS + +[0.0.51] +- Fix memory management + From 255422d5be5ccedc16be6dbd312fe3fc639069d8 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 14 Sep 2015 17:28:31 -0700 Subject: [PATCH 083/234] 0.0.52 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index c95c141c5..41bc6f52d 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -172,3 +172,6 @@ [0.0.51] - Fix memory management +[0.0.52] +- Restrict addons memory +- Get nofication about container OOMs From e7a21c821ec60da43e9394515cc93ae3937c7573 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 14 Sep 2015 22:20:13 -0700 Subject: [PATCH 084/234] 0.0.53 changes --- release/CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 41bc6f52d..6c959d7ad 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -175,3 +175,7 @@ [0.0.52] - Restrict addons memory - Get nofication about container OOMs + +[0.0.53] +- Add retry to subdomain logic + From cfdfb9a907de2c8e744f9fc1160bcdb7bef1e923 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 15 Sep 2015 09:48:05 -0700 Subject: [PATCH 085/234] release: add edit command --- release/release | 114 +++++++++++++++++++++++++++++++----------------- 1 file changed, 73 insertions(+), 41 deletions(-) diff --git a/release/release b/release/release index 0b773b6da..1c88edac1 100755 --- a/release/release +++ b/release/release @@ -5,22 +5,23 @@ require('supererror')({ splatchError: true }); require('colors'); -var superagent = require('superagent'), +var assert = require('assert'), async = require('async'), - safe = require('safetydance'), AWS = require('aws-sdk'), - yesno = require('yesno'), - Table = require('easy-table'), - program = require('commander'), - semver = require('semver'), - util = require('util'), - versionsFormat = require('./versionsformat.js'), execSync = require('child_process').execSync, + os = require('os'), parseChangelog = require('./parsechangelog.js').parse, - url = require('url'), path = require('path'), postmark = require('postmark')(process.env.POSTMARK_API_KEY_TOOLS), - assert = require('assert'); + program = require('commander'), + safe = require('safetydance'), + semver = require('semver'), + superagent = require('superagent'), + Table = require('easy-table'), + url = require('url'), + util = require('util'), + versionsFormat = require('./versionsformat.js'), + yesno = require('yesno'); var DIGITALOCEAN = 'https://api.digitalocean.com/v2'; @@ -149,6 +150,32 @@ function newRelease(options) { verifyAndUpload(env, releases, exit); } +function edit(options) { + var env = ENVIRONMENTS[options.env]; + if (!env) exit(new Error(util.format('Unknown environment %s', options.env))); + +console.log('Getting ', env.url); + superagent.get(env.url).end(function (error, result) { + if (error || result.error) return exit(error || result.error); + + var oldContents = result.type === 'application/json' ? JSON.stringify(result.body, null, 4) : result.text; + var tmpfile = path.join(os.tmpdir(), 'versions.json'); + safe.fs.writeFileSync(tmpfile, oldContents); + + var editor = require('child_process').spawn(process.env.EDITOR || 'vim', [tmpfile], {stdio: 'inherit'}); + editor.on('error', exit); + editor.on('exit', function () { + var newContents = safe.fs.readFileSync(tmpfile, 'utf8'); + if (!newContents || newContents.trim().length === 0 || newContents === oldContents) return exit('Unchanged'); + + var releases = safe.JSON.parse(newContents); + if (!releases) exit(new Error(options.file + ' has invalid json :' + safe.error.message)); + + verifyAndUpload(env, releases, exit); + }); + }); +} + function createRelease(options) { var env = ENVIRONMENTS[options.env]; if (!env) exit(new Error(util.format('Unknown environment %s', options.env))); @@ -519,26 +546,6 @@ function stage(fromEnv, toEnv) { program.version('0.0.1'); -program.command('create') - .option('--env ', 'Environment (dev/staging/prod)', 'dev') - .option('--code ', 'Source code url') - .option('--image ', 'Image id') - .option('--changelog ', 'Changelog') - .option('--upgrade', 'Set the upgrade flag') - .description('Create a new release') - .action(createRelease); - -program.command('revert') - .option('--env ', 'Environment (dev/staging/prod)', 'dev') - .description('Revert the last release. Use with care') - .action(function (options) { options.revert = true; createRelease(options); }); - -program.command('new') - .option('--env ', 'Environment (dev/staging/prod)', 'dev') - .option('--file ', 'Upload file as versions.json') - .description('Upload a new versions.json') - .action(newRelease); - program.command('amend') .option('--env ', 'Environment (dev/staging/prod)', 'dev') .option('--code ', 'Source code url') @@ -548,10 +555,19 @@ program.command('amend') .description('Amend last release. Use with care') .action(function (options) { options.amend = true; createRelease(options); }); -program.command('rerelease') +program.command('create') .option('--env ', 'Environment (dev/staging/prod)', 'dev') - .description('Make a new release, same as the last release') - .action(function (options) { options.rerelease = true; createRelease(options); }); + .option('--code ', 'Source code url') + .option('--image ', 'Image id') + .option('--changelog ', 'Changelog') + .option('--upgrade', 'Set the upgrade flag') + .description('Create a new release') + .action(createRelease); + +program.command('edit') + .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .description('Edit and upload versions.json') + .action(edit); program.command('list') .option('--raw', 'Show raw json') @@ -559,24 +575,40 @@ program.command('list') .description('List the releases file') .action(listRelease); -program.command('sync') - .option('--env ', 'Environment (dev/staging)', 'dev') - .description('Sync the specified env with the parent env (prod -> staging or staging -> dev)') - .action(sync); +program.command('new') + .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .option('--file ', 'Upload file as versions.json') + .description('Upload a new versions.json') + .action(newRelease); + +program.command('publish') + .description('Publish latest staging version to production') + .action(stage.bind(null, ENVIRONMENTS['staging'], ENVIRONMENTS['prod'])); + +program.command('rerelease') + .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .description('Make a new release, same as the last release') + .action(function (options) { options.rerelease = true; createRelease(options); }); + +program.command('revert') + .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .description('Revert the last release. Use with care') + .action(function (options) { options.revert = true; createRelease(options); }); program.command('stage') .description('Stage latest dev version to staging') .action(stage.bind(null, ENVIRONMENTS['dev'], ENVIRONMENTS['staging'])); +program.command('sync') + .option('--env ', 'Environment (dev/staging)', 'dev') + .description('Sync the specified env with the parent env (prod -> staging or staging -> dev)') + .action(sync); + program.command('touch') .option('--env ', 'Environment (dev/staging/prod)', 'dev') .description('Touch the releases file') .action(touchRelease); -program.command('publish') - .description('Publish latest staging version to production') - .action(stage.bind(null, ENVIRONMENTS['staging'], ENVIRONMENTS['prod'])); - program.parse(process.argv); if (!process.argv.slice(2).length) { From 31b4923eb2a6ed9c8d8dc08785743b245e96c185 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 15 Sep 2015 10:33:36 -0700 Subject: [PATCH 086/234] better output --- release/release | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/release/release b/release/release index 1c88edac1..61fc6d2d4 100755 --- a/release/release +++ b/release/release @@ -290,11 +290,11 @@ function listRelease(options) { for (var release in result.body) { t.cell('Release', release); t.cell('Image ID', result.body[release].imageId + (result.body[release].upgrade ? '*' : '')); - t.cell('Image Name', result.body[release].imageName); + t.cell('Image Name', result.body[release].imageName.match(/box-dev-([\w]+)-.*/)[1]); t.cell('Date', result.body[release].date); - t.cell('Author', result.body[release].author); + t.cell('Author', result.body[release].author.split(' ')[0]); t.cell('Next', result.body[release].next); - t.cell('Source', result.body[release].sourceTarballUrl.slice(result.body[release].sourceTarballUrl.lastIndexOf('/') + 1)); + t.cell('Source', result.body[release].sourceTarballUrl.match(/\/box-(.*).tar.gz/)[1].slice(0, 7)); t.newRow(); } From 11592279e239a008709b408fed39681104e3906c Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 15 Sep 2015 10:41:36 -0700 Subject: [PATCH 087/234] fix regexp for non-dev --- release/release | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/release b/release/release index 61fc6d2d4..600da947c 100755 --- a/release/release +++ b/release/release @@ -290,7 +290,7 @@ function listRelease(options) { for (var release in result.body) { t.cell('Release', release); t.cell('Image ID', result.body[release].imageId + (result.body[release].upgrade ? '*' : '')); - t.cell('Image Name', result.body[release].imageName.match(/box-dev-([\w]+)-.*/)[1]); + t.cell('Image Name', result.body[release].imageName.match(/box-(prod|staging|dev)-([\w]+)-.*/)[2]); t.cell('Date', result.body[release].date); t.cell('Author', result.body[release].author.split(' ')[0]); t.cell('Next', result.body[release].next); From 3f064322e461b89ba26cdce57eecb729b38f05ee Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 15 Sep 2015 11:03:21 -0700 Subject: [PATCH 088/234] strip unreachable releases when processing --- package.json | 1 + release/release | 40 +++++++++++++++++++++++++++++++--------- 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/package.json b/package.json index 9825a1949..2b64638d4 100644 --- a/package.json +++ b/package.json @@ -40,6 +40,7 @@ "semver": "^4.3.0", "ssh2": "^0.4.6", "supererror": "^0.6.0", + "underscore": "^1.8.3", "yesno": "0.0.1" }, "scripts": { diff --git a/release/release b/release/release index 600da947c..a063af506 100755 --- a/release/release +++ b/release/release @@ -21,7 +21,8 @@ var assert = require('assert'), url = require('url'), util = require('util'), versionsFormat = require('./versionsformat.js'), - yesno = require('yesno'); + yesno = require('yesno'), + _ = require('underscore'); var DIGITALOCEAN = 'https://api.digitalocean.com/v2'; @@ -58,6 +59,20 @@ function exit(error) { process.exit(error ? 1 : 0); } +function stripUnreachable(releases) { + var reachableVersions = [ ]; + var curVersion = '0.0.1'; + + while (true) { + reachableVersions.push(curVersion); + var nextVersion = releases[curVersion].next; + if (!nextVersion) break; + curVersion = nextVersion; + } + + return _.pick(releases, reachableVersions); +} + function notifyAdmins(env, releases, callback) { console.log('Notifying admins about new release'.gray); @@ -154,7 +169,6 @@ function edit(options) { var env = ENVIRONMENTS[options.env]; if (!env) exit(new Error(util.format('Unknown environment %s', options.env))); -console.log('Getting ', env.url); superagent.get(env.url).end(function (error, result) { if (error || result.error) return exit(error || result.error); @@ -201,6 +215,9 @@ function createRelease(options) { if (!releases) exit(new Error('versions.json is not valid JSON')); + var strippedReleases = stripUnreachable(releases); + var lastReachableVersion = Object.keys(strippedReleases).sort(semver.rcompare)[0]; + var sortedVersions = Object.keys(releases).sort(semver.rcompare); var lastVersion = sortedVersions[0]; @@ -215,11 +232,11 @@ function createRelease(options) { } var newVersion = options.amend ? lastVersion : semver.inc(lastVersion, 'patch'); - releases[lastVersion].next = newVersion; + releases[lastReachableVersion].next = newVersion; - var newImageId = options.image ? parseInt(options.image, 10) : releases[lastVersion].imageId; - var sourceTarballUrl = options.code || releases[lastVersion].sourceTarballUrl; - var upgrade = options.upgrade || (releases[lastVersion].imageId !== newImageId); + var newImageId = options.image ? parseInt(options.image, 10) : releases[lastReachableVersion].imageId; + var sourceTarballUrl = options.code || releases[lastReachableVersion].sourceTarballUrl; + var upgrade = options.upgrade || (releases[lastReachableVersion].imageId !== newImageId); // check if we have a changelog otherwise var changelog = parseChangelog(newVersion); @@ -311,7 +328,9 @@ function touchRelease(options, callback) { superagent.get(env.url).end(function (error, result) { if (error || result.error) return exit(error || result.error); - var latestVersion = Object.keys(result.body).sort(semver.rcompare)[0]; + var strippedReleases = stripUnreachable(result.body); + + var latestVersion = Object.keys(strippedReleases).sort(semver.rcompare)[0]; result.body[latestVersion].date = (new Date()).toString(); verifyAndUpload(env, result.body, exit); @@ -485,8 +504,11 @@ function stage(fromEnv, toEnv) { var toReleases = result.type === 'application/json' ? result.body : safe.JSON.parse(result.text); if (!toReleases) exit(new Error('versions.json is not valid JSON')); - var latestFromVersion = Object.keys(fromReleases).sort(semver.rcompare)[0]; - var latestToVersion = Object.keys(toReleases).sort(semver.rcompare)[0]; + var strippedFromReleases = stripUnreachable(fromReleases); + var strippedToReleases = stripUnreachable(toReleases); + + var latestFromVersion = Object.keys(strippedFromReleases).sort(semver.rcompare)[0]; + var latestToVersion = Object.keys(strippedToReleases).sort(semver.rcompare)[0]; var nextVersion = semver.inc(latestToVersion, 'patch'); console.log('Releasing version %s to %s'.gray, nextVersion, toEnv.tag); From d99720258a0624a93eab592668130f06674c7ee6 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 15 Sep 2015 11:17:59 -0700 Subject: [PATCH 089/234] gray out unreachable releases --- release/release | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/release/release b/release/release index a063af506..eb1d1ca74 100755 --- a/release/release +++ b/release/release @@ -302,10 +302,12 @@ function listRelease(options) { exit(null); } + var strippedReleases = stripUnreachable(result.body); + var t = new Table(); for (var release in result.body) { - t.cell('Release', release); + t.cell('Release', release in strippedReleases ? release.white : release.gray); t.cell('Image ID', result.body[release].imageId + (result.body[release].upgrade ? '*' : '')); t.cell('Image Name', result.body[release].imageName.match(/box-(prod|staging|dev)-([\w]+)-.*/)[2]); t.cell('Date', result.body[release].date); From ec235eafe8f93939e816fc01b86c2518efeff28a Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 15 Sep 2015 11:25:03 -0700 Subject: [PATCH 090/234] fix staging with stripped releases --- release/release | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/release/release b/release/release index eb1d1ca74..7dfca62eb 100755 --- a/release/release +++ b/release/release @@ -506,14 +506,11 @@ function stage(fromEnv, toEnv) { var toReleases = result.type === 'application/json' ? result.body : safe.JSON.parse(result.text); if (!toReleases) exit(new Error('versions.json is not valid JSON')); - var strippedFromReleases = stripUnreachable(fromReleases); - var strippedToReleases = stripUnreachable(toReleases); - - var latestFromVersion = Object.keys(strippedFromReleases).sort(semver.rcompare)[0]; - var latestToVersion = Object.keys(strippedToReleases).sort(semver.rcompare)[0]; + var latestFromVersion = Object.keys(fromReleases).sort(semver.rcompare)[0]; + var latestToVersion = Object.keys(toReleases).sort(semver.rcompare)[0]; var nextVersion = semver.inc(latestToVersion, 'patch'); - console.log('Releasing version %s to %s'.gray, nextVersion, toEnv.tag); + console.log('Releasing version %s to %s'.gray, nextVersion , toEnv.tag); // check if we even have a new version to stage if (latestFromVersion === latestToVersion) exit(util.format('No new version on %s to stage.', fromEnv.tag)); @@ -522,7 +519,9 @@ function stage(fromEnv, toEnv) { var changelog = parseChangelog(nextVersion); if (changelog.length === 0) exit(new Error('No changelog found for version ' + nextVersion)); - var latestFromImageName = fromReleases[latestFromVersion].imageName; + var strippedFromReleases = stripUnreachable(fromReleases); + var latestReachableFromVersion = Object.keys(strippedFromReleases).sort(semver.rcompare)[0]; + var latestFromImageName = fromReleases[latestReachableFromVersion].imageName; var latestFromImageRevision = new RegExp('box-' + fromEnv.tag + '-([a-z,0-9.]+)-.*').exec(latestFromImageName)[1]; if (!latestFromImageRevision) exit('Unable to determine image revision'); @@ -530,8 +529,8 @@ function stage(fromEnv, toEnv) { getImageByRevision(toEnv, latestFromImageRevision, function (error, toImage) { if (error) return exit(error); - var sourceTarballName = url.parse(fromReleases[latestFromVersion].sourceTarballUrl).pathname.substr(1); - var upgrade = fromReleases[latestFromVersion].upgrade; + var sourceTarballName = url.parse(fromReleases[latestReachableFromVersion].sourceTarballUrl).pathname.substr(1); + var upgrade = fromReleases[latestReachableFromVersion].upgrade; console.log('Copying source code tarball %s to %s'.gray, sourceTarballName, toEnv.tag); @@ -544,7 +543,9 @@ function stage(fromEnv, toEnv) { execSync(cmd, { stdio: [ null, process.stdout, process.stderr ] } ); - toReleases[latestToVersion].next = nextVersion; + var strippedToReleases = stripUnreachable(toReleases); + var latestReachableToVersion = Object.keys(strippedToReleases).sort(semver.rcompare)[0]; + toReleases[latestReachableToVersion].next = nextVersion; toReleases[nextVersion] = { imageId: toImage.id, imageName: toImage.name, From e89b4a151e4cbe541c7c5a51d072fc579657d32b Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 15 Sep 2015 11:57:50 -0700 Subject: [PATCH 091/234] 0.0.52 is folded into 0.0.53 --- release/CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 6c959d7ad..6f4ec9bc8 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -177,5 +177,7 @@ - Get nofication about container OOMs [0.0.53] +- Restrict addons memory +- Get nofication about container OOMs - Add retry to subdomain logic From 01889c45a20a5a67022e86f3dea22091bcc95c04 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 15 Sep 2015 14:04:30 -0700 Subject: [PATCH 092/234] Fix typo --- release/CHANGES | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/CHANGES b/release/CHANGES index 6f4ec9bc8..6acacf874 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -178,6 +178,6 @@ [0.0.53] - Restrict addons memory -- Get nofication about container OOMs +- Get notification about container OOMs - Add retry to subdomain logic From 5523c2d34a576b57155c6d45f815795fa2b492c8 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 15 Sep 2015 14:29:16 -0700 Subject: [PATCH 093/234] Disable forwarding to syslog --- images/initializeBaseUbuntuImage.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index a993b1524..1a632b1a8 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -220,6 +220,8 @@ EOF systemctl enable box-setup # Configure systemd -sed -e "s/^#SystemMaxUse=/SystemMaxUse=100M/" -i /etc/systemd/journald.conf +sed -e "s/^#SystemMaxUse=/SystemMaxUse=100M/" \ + -e "s/^#ForwardToSyslog=/ForwardToSyslog=no/" \ + -i /etc/systemd/journald.conf sync From cb73218dfe1ef879b24d0b6e7e252c14b998e7ac Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 15 Sep 2015 14:32:47 -0700 Subject: [PATCH 094/234] Disable rsyslog --- images/initializeBaseUbuntuImage.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 1a632b1a8..fc48d08f0 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -219,7 +219,8 @@ EOF systemctl enable box-setup -# Configure systemd +# Configure journald logging +systemctl disable rsyslog.service sed -e "s/^#SystemMaxUse=/SystemMaxUse=100M/" \ -e "s/^#ForwardToSyslog=/ForwardToSyslog=no/" \ -i /etc/systemd/journald.conf From 4b5ac679932bac0b74ffd53ee36ad2b40018750e Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 16 Sep 2015 09:48:47 -0700 Subject: [PATCH 095/234] Revert "Disable rsyslog" This reverts commit 3c5db59de2b6c2ef8891ecba54496335b5e2d55f. Don't revert this. Maybe some system services use this. --- images/initializeBaseUbuntuImage.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index fc48d08f0..1a632b1a8 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -219,8 +219,7 @@ EOF systemctl enable box-setup -# Configure journald logging -systemctl disable rsyslog.service +# Configure systemd sed -e "s/^#SystemMaxUse=/SystemMaxUse=100M/" \ -e "s/^#ForwardToSyslog=/ForwardToSyslog=no/" \ -i /etc/systemd/journald.conf From e328ec23823379dceb935779549c3b79e9015530 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 16 Sep 2015 10:35:59 -0700 Subject: [PATCH 096/234] 0.0.54 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 6acacf874..a35f88b5b 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -181,3 +181,6 @@ - Get notification about container OOMs - Add retry to subdomain logic +[0.0.54] +- OAuth Proxy now uses internal port forwarding + From 0cd56f4d4c6722c315154896d834b8916d84f7ed Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 16 Sep 2015 13:17:04 -0700 Subject: [PATCH 097/234] configure cloudron to use UTC local timezone should be tracked by the webadmin/box code --- images/initializeBaseUbuntuImage.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 1a632b1a8..7e1a2989d 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -225,3 +225,8 @@ sed -e "s/^#SystemMaxUse=/SystemMaxUse=100M/" \ -i /etc/systemd/journald.conf sync + +# Configure time +sed -e 's/^#NTP=/NTP=0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org/' -i /etc/systemd/timesyncd.conf +timedatectl set-ntp 1 +timedatectl set-timezone UTC From 0f2435c30867b66eba99b433ba38a8e3b974946b Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 16 Sep 2015 17:02:22 -0700 Subject: [PATCH 098/234] Version 0.0.55 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index a35f88b5b..c24584c33 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -184,3 +184,6 @@ [0.0.54] - OAuth Proxy now uses internal port forwarding +[0.0.55] +- Setup cloudron timezone based on droplet region + From 4bb017b7404cb19d6776e76200a917f0d0598f10 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 17 Sep 2015 12:11:14 -0700 Subject: [PATCH 099/234] verify next version exists --- release/versionsformat.js | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/release/versionsformat.js b/release/versionsformat.js index 290907018..1cb91dca5 100644 --- a/release/versionsformat.js +++ b/release/versionsformat.js @@ -20,25 +20,31 @@ function verify(versionsJson) { var sortedVersions = Object.keys(versionsJson).sort(semver.compare); for (var i = 0; i < sortedVersions.length; i++) { var version = sortedVersions[i]; - if (typeof versionsJson[version].imageId !== 'number') return new Error('version ' + version + ' does not have proper imageId'); + var versionInfo = versionsJson[version]; - if (typeof versionsJson[version].imageName !== 'string' || !versionsJson[version].imageName.length) return new Error('version ' + version + ' does not have proper imageName'); + if (typeof versionInfo.imageId !== 'number') return new Error('version ' + version + ' does not have proper imageId'); - if ('changeLog' in versionsJson[version] && !util.isArray(versionsJson[version].changeLog)) return new Error('version ' + version + ' does not have proper changeLog'); + if (typeof versionInfo.imageName !== 'string' || !versionInfo.imageName.length) return new Error('version ' + version + ' does not have proper imageName'); - if (typeof versionsJson[version].date !== 'string' || ((new Date(versionsJson[version].date)).toString() === 'Invalid Date')) return new Error('invalid date or missing date'); + if ('changeLog' in versionsJson[version] && !util.isArray(versionInfo.changeLog)) return new Error('version ' + version + ' does not have proper changeLog'); - if (versionsJson[version].next !== null && typeof versionsJson[version].next !== 'string') return new Error('version ' + version + ' does not have proper next'); + if (typeof versionInfo.date !== 'string' || ((new Date(versionInfo.date)).toString() === 'Invalid Date')) return new Error('invalid date or missing date'); - if (typeof versionsJson[version].sourceTarballUrl !== 'string') return new Error('version ' + version + ' does not have proper sourceTarballUrl'); + if (versionInfo.next !== null) { + if (typeof versionInfo.next !== 'string') return new Error('version ' + version + ' does not have "string" next'); + if (!semver.valid(versionInfo.next)) return new Error('version ' + version + ' has non-semver next'); + if (!(versionInfo.next in versionsJson)) return new Error('version ' + version + ' points to non-existent version'); + } - if ('author' in versionsJson[version] && typeof versionsJson[version].author !== 'string') return new Error('author must be a string'); + if (typeof versionInfo.sourceTarballUrl !== 'string') return new Error('version ' + version + ' does not have proper sourceTarballUrl'); - var tarballUrl = url.parse(versionsJson[version].sourceTarballUrl); + if ('author' in versionsJson[version] && typeof versionInfo.author !== 'string') return new Error('author must be a string'); + + var tarballUrl = url.parse(versionInfo.sourceTarballUrl); if (tarballUrl.protocol !== 'https:') return new Error('sourceTarballUrl must be https'); if (!/.tar.gz$/.test(tarballUrl.path)) return new Error('sourceTarballUrl must be tar.gz'); - var nextVersion = versionsJson[version].next; + var nextVersion = versionInfo.next; // despite having the 'next' field, the appstore code currently relies on all versions being sorted based on semver.compare (see boxversions.js) if (nextVersion && semver.gt(version, nextVersion)) return new Error('next version cannot be less than current @' + version); } From 44ff676eef02ebf14a13b4e5c9b046aff4635803 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 17 Sep 2015 13:48:20 -0700 Subject: [PATCH 100/234] store dates as iso strings --- release/release | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/release/release b/release/release index 7dfca62eb..524f41161 100755 --- a/release/release +++ b/release/release @@ -252,7 +252,7 @@ function createRelease(options) { imageName: result.body.image.name, changelog: changelog, upgrade: upgrade, - date: (new Date()).toString(), + date: (new Date()).toISOString(), author: username + ' <' + email + '>', next: null }; @@ -333,7 +333,7 @@ function touchRelease(options, callback) { var strippedReleases = stripUnreachable(result.body); var latestVersion = Object.keys(strippedReleases).sort(semver.rcompare)[0]; - result.body[latestVersion].date = (new Date()).toString(); + result.body[latestVersion].date = (new Date()).toISOString(); verifyAndUpload(env, result.body, exit); }); @@ -551,7 +551,7 @@ function stage(fromEnv, toEnv) { imageName: toImage.name, changelog: changelog, upgrade: upgrade, - date: (new Date()).toString(), + date: (new Date()).toISOString(), sourceTarballUrl: 'https://' + toEnv.releasesBucket + '.s3.amazonaws.com/' + sourceTarballName, author: username + ' <' + email + '>', next: null From 8d5a3ecd690e4adbf1c31eaadd5894919dac6e51 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 17 Sep 2015 15:29:42 -0700 Subject: [PATCH 101/234] admin: remove the cloudron chooser This needlessly ties down this tool to digitalocean --- admin/admin | 104 ++-------------------------------------------------- 1 file changed, 4 insertions(+), 100 deletions(-) diff --git a/admin/admin b/admin/admin index b976ed679..c5caef128 100755 --- a/admin/admin +++ b/admin/admin @@ -14,7 +14,6 @@ var assert = require('assert'), readlineSync = require('readline-sync'), spawn = require('child_process').spawn, SshClient = require('ssh2').Client, - superagent = require('superagent'), util = require('util'); require('colors'); @@ -22,9 +21,6 @@ require('colors'); var SSH = 'root@%s -tt -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=10 -i %s'; var sshKeyPath = path.join(process.env.HOME, '/.ssh/id_rsa_yellowtent'); -if (!process.env['DIGITAL_OCEAN_TOKEN_DEV']) exit('Missing env variable DIGITAL_OCEAN_TOKEN_DEV'); -if (!process.env['DIGITAL_OCEAN_TOKEN_STAGING']) exit('Missing env variable DIGITAL_OCEAN_TOKEN_STAGING'); - if (!fs.existsSync(sshKeyPath)) exit('Unable to find ssh key path. Searching for ' + sshKeyPath); // Allow self signed certs! @@ -35,91 +31,6 @@ function exit(error) { process.exit(error ? 1 : 0); } -function getDroplets(token, callback) { - assert.strictEqual(typeof token, 'string'); - assert.strictEqual(typeof callback, 'function'); - - var droplets = []; - var nextPage = null; - - async.doWhilst(function (callback) { - var url = nextPage ? nextPage : 'https://api.digitalocean.com/v2/droplets'; - - superagent.get(url).set('Authorization', 'Bearer ' + token).end(function (error, result) { - if (error) return callback(error.message); - if (result.statusCode === 403) return callback('Invalid Digitalocean credentials'); - if (result.statusCode !== 200) return callback(util.format('Unable to get droplet list. %s - %s', result.statusCode, result.text)); - - nextPage = (result.body.links && result.body.links.pages) ? result.body.links.pages.next : null; - droplets = droplets.concat(result.body.droplets); - - callback(null); - }); - }, function () { return !!nextPage; }, function (error) { - if (error) return callback(error); - callback(null, droplets); - }); -} - -function selectCloudron(action) { - assert.strictEqual(typeof action, 'function'); - - var dropletsDev = []; - var dropletsStaging = []; - var dropletsProd = []; - - console.log('Getting droplet lists from dev and staging...'); - - getDroplets(process.env['DIGITAL_OCEAN_TOKEN_DEV'], function (error, result) { - if (error) exit(error); - - dropletsDev = result; - - getDroplets(process.env['DIGITAL_OCEAN_TOKEN_STAGING'], function (error, result) { - if (error) exit(error); - - dropletsStaging = result; - - getDroplets(process.env['DIGITAL_OCEAN_TOKEN_PROD'], function (error, result) { - if (error) exit(error); - - dropletsProd = result; - - console.log(); - console.log('Available Droplets on dev:'.bold); - dropletsDev.forEach(function (droplet, index) { - console.log('\t(%s)\t%s %s', index, droplet.name.cyan, droplet.networks.v4[0].ip_address); - }); - - console.log(); - console.log('Available Droplets on staging:'.bold); - dropletsStaging.forEach(function (droplet, index) { - console.log('\t(%s)\t%s %s', dropletsDev.length + index, droplet.name.cyan, droplet.networks.v4[0].ip_address); - }); - - console.log(); - console.log('Available Droplets on prod:'.bold); - dropletsProd.forEach(function (droplet, index) { - console.log('\t(%s)\t%s %s', dropletsDev.length + dropletsStaging.length + index, droplet.name.cyan, droplet.networks.v4[0].ip_address); - }); - - console.log(); - - var droplets = dropletsDev.concat(dropletsStaging).concat(dropletsProd); - - var index = -1; - while (true) { - index = parseInt(readlineSync.question('Choose cloudron [0-' + (droplets.length-1) + ']: ', {})); - if (isNaN(index) || index < 0 || index > droplets.length-1) console.log('Invalid selection'.red); - else break; - } - - action(droplets[index].networks.v4[0].ip_address); - }); - }); - }); -} - function loginToCloudron(ip) { assert.strictEqual(typeof ip, 'string'); @@ -270,16 +181,11 @@ function hotfix(options) { code = options.code; } - if (!options.ip) { - selectCloudron(function (ip) { hotfixCloudron(ip, code); }); - } else { - hotfixCloudron(options.ip, code); - } + hotfixCloudron(options.ip, code); } function login(options) { - if (!options.ip) selectCloudron(loginToCloudron); - else loginToCloudron(options.ip); + loginToCloudron(options.ip); } function logs(options) { @@ -289,13 +195,11 @@ function logs(options) { if (options.nginxAccess) fileName = '/var/log/nginx/access.log'; if (options.nginxError) fileName = '/var/log/nginx/error.log'; - if (!options.ip) selectCloudron(function (ip) { logsFromCloudron(ip, fileName, !!options.tail); }); - else logsFromCloudron(options.ip, fileName, !!options.tail); + logsFromCloudron(options.ip, fileName, !!options.tail); } function backup(options) { - if (!options.ip) selectCloudron(triggerBackup); - else triggerBackup(options.ip); + triggerBackup(options.ip); } // entry point From a9444ed879e06360255f60e2e45fb64f636edd95 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 17 Sep 2015 15:34:04 -0700 Subject: [PATCH 102/234] rename login to ssh --- admin/admin | 45 ++++++++++++++++++--------------------------- 1 file changed, 18 insertions(+), 27 deletions(-) diff --git a/admin/admin b/admin/admin index c5caef128..e95d37bf0 100755 --- a/admin/admin +++ b/admin/admin @@ -4,7 +4,6 @@ var assert = require('assert'), async = require('async'), - crypto = require('crypto'), execSync = require('child_process').execSync, fs = require('fs'), https = require('https'), @@ -31,26 +30,6 @@ function exit(error) { process.exit(error ? 1 : 0); } -function loginToCloudron(ip) { - assert.strictEqual(typeof ip, 'string'); - - console.log('Ssh into %s'.bold, ip.cyan); - - var ssh = spawn('ssh', util.format(SSH, ip, sshKeyPath).split(' ')); - ssh.on('exit', exit); - ssh.on('error', exit); - - process.stdin.setEncoding('utf8'); - process.stdin.setRawMode(true); - - process.stdin.pipe(ssh.stdin); - ssh.stdout.pipe(process.stdout); - ssh.stderr.pipe(process.stderr); - - process.stdin.resume(); - -} - function logsFromCloudron(ip, fileName, tail) { assert.strictEqual(typeof ip, 'string'); assert.strictEqual(typeof fileName, 'string'); @@ -184,8 +163,21 @@ function hotfix(options) { hotfixCloudron(options.ip, code); } -function login(options) { - loginToCloudron(options.ip); +function ssh(ip) { + console.log('Ssh into %s'.bold, ip.cyan); + + var sshClient = spawn('ssh', util.format(SSH, ip, sshKeyPath).split(' ')); + sshClient.on('exit', exit); + sshClient.on('error', exit); + + process.stdin.setEncoding('utf8'); + process.stdin.setRawMode(true); + + process.stdin.pipe(sshClient.stdin); + sshClient.stdout.pipe(process.stdout); + sshClient.stderr.pipe(process.stderr); + + process.stdin.resume(); } function logs(options) { @@ -205,10 +197,9 @@ function backup(options) { // entry point program.version('0.1.0'); -program.command('login') - .description('Login to cloudron') - .option('--ip ', 'Cloudron IP') - .action(login); +program.command('ssh') + .description('SSH to cloudron') + .action(ssh); program.command('logs') .description('Fetch logs by filename') From f9c2b0acd1e15d1df7513268e59017d1347a508c Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 17 Sep 2015 15:34:15 -0700 Subject: [PATCH 103/234] remove cloudronLogin alternative: admin/admin ssh --- admin/cloudronLogin | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100755 admin/cloudronLogin diff --git a/admin/cloudronLogin b/admin/cloudronLogin deleted file mode 100755 index 4e62de8c7..000000000 --- a/admin/cloudronLogin +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -eu -o pipefail - -readonly ssh_keys="${HOME}/.ssh/id_rsa_yellowtent" - -if [[ "$#" != "1" ]]; then - echo "Missing cloudron IP argument"; - exit 1; -fi - -if [[ ! -f "${ssh_keys}" ]]; then - echo "yellowtent ssh key is missing" - exit 1 -fi - -ssh root@$1 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=10 -i "${ssh_keys}" From a3383b1f98ec9fb4c69055fef1f4fb002224a3d9 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 17 Sep 2015 15:44:05 -0700 Subject: [PATCH 104/234] remove logs route most of this stuff doesn't work anyways since we moved to systemd --- admin/admin | 52 --------------------------------------------------- src/server.js | 34 --------------------------------- 2 files changed, 86 deletions(-) diff --git a/admin/admin b/admin/admin index e95d37bf0..1f2ba66f8 100755 --- a/admin/admin +++ b/admin/admin @@ -30,38 +30,6 @@ function exit(error) { process.exit(error ? 1 : 0); } -function logsFromCloudron(ip, fileName, tail) { - assert.strictEqual(typeof ip, 'string'); - assert.strictEqual(typeof fileName, 'string'); - assert.strictEqual(typeof tail, 'boolean'); - - console.log('Fetching logs from'.bold, ip.cyan); - - var options = { - hostname: ip, - port: 886, - path: util.format('/api/v1/installer/logs?filename=%s&tail=%s', fileName, tail), - method: 'GET', - key: fs.readFileSync(path.join(__dirname, '../../keys/installer/server.key')), - cert: fs.readFileSync(path.join(__dirname, '../../keys/installer/server.crt')), - ca: fs.readFileSync(path.join(__dirname, '../../keys/installer_ca/ca.crt')), - rejectUnauthorized: false - }; - - var req = https.request(options, function (res) { - res.setEncoding('utf8'); - res.on('data', function (chunk) { - process.stdout.write(chunk); - }); - }); - - req.on('error', function (error) { - exit(error); - }); - - req.end(); -} - function triggerBackup(ip) { assert.strictEqual(typeof ip, 'string'); @@ -180,16 +148,6 @@ function ssh(ip) { process.stdin.resume(); } -function logs(options) { - var fileName = '/var/log/supervisor/box.log'; - - if (options.installer) fileName = '/var/log/cloudron/installserver.log'; - if (options.nginxAccess) fileName = '/var/log/nginx/access.log'; - if (options.nginxError) fileName = '/var/log/nginx/error.log'; - - logsFromCloudron(options.ip, fileName, !!options.tail); -} - function backup(options) { triggerBackup(options.ip); } @@ -201,16 +159,6 @@ program.command('ssh') .description('SSH to cloudron') .action(ssh); -program.command('logs') - .description('Fetch logs by filename') - .option('--ip ', 'Cloudron IP') - .option('-f, --tail', 'tail the logs') - .option('--installer', 'installer logs') - .option('--nginx-error', 'nginx error logs') - .option('--nginx-access', 'nginx access logs') - .option('--box', 'box logs [default]') - .action(logs); - program.command('hotfix') .description('Hotfix a cloudron') .option('--ip ', 'Cloudron IP') diff --git a/src/server.js b/src/server.js index 7d618c4ee..8119e98dd 100755 --- a/src/server.js +++ b/src/server.js @@ -77,39 +77,6 @@ function retire(req, res, next) { next(new HttpSuccess(202, {})); } -function logs(req, res, next) { - if (!req.query.filename) return next(new HttpError(400, 'No filename provided')); - var tail = req.query.tail === 'true'; - var stream = null; - - var stat = safe.fs.statSync(req.query.filename); - - if (!stat) return res.status(404).send('Not found'); - - if (tail) { - var tailStreamOptions = { - beginAt: 'end', - onMove: 'follow', - detectTruncate: true, - onTruncate: 'end', - endOnError: true - }; - - stream = safe(function () { return ts.createReadStream(req.query.filename, tailStreamOptions); }); - stream.destroy = stream.end; // tail-stream closes it's watchers with this special API - } else { - stream = fs.createReadStream(req.query.filename); - res.set('content-length', stat.size); - } - - if (!stream) return res.status(404).send(safe.error.message); - - stream.on('error', function (error) { res.write(error.message); res.end(); }); - res.on('close', function () { stream.destroy(); }); - res.status(200); - stream.pipe(res); -} - function backup(req, res, next) { // !! below port has to be in sync with box/config.js internalPort superagent.post('http://127.0.0.1:3001/api/v1/backup').end(function (error, result) { @@ -158,7 +125,6 @@ function startProvisionServer(callback) { .use(lastMile()); router.post('/api/v1/installer/retire', retire); - router.get ('/api/v1/installer/logs', logs); router.post('/api/v1/installer/backup', backup); var caPath = path.join(__dirname, process.env.NODE_ENV === 'test' ? '../../keys/installer_ca' : 'certs'); From 1d27fffe44c07dade03206b8bca8f2d104666032 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 17 Sep 2015 16:21:03 -0700 Subject: [PATCH 105/234] remove ununsed requires --- src/server.js | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/server.js b/src/server.js index 8119e98dd..2eedf8f6c 100755 --- a/src/server.js +++ b/src/server.js @@ -18,9 +18,7 @@ var assert = require('assert'), lastMile = require('connect-lastmile'), morgan = require('morgan'), path = require('path'), - safe = require('safetydance'), - superagent = require('superagent'), - ts = require('tail-stream'); + superagent = require('superagent'); exports = module.exports = { start: start, From 6c5a5c0882e0b69f130cc61d70258b0434734651 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 17 Sep 2015 16:21:18 -0700 Subject: [PATCH 106/234] remove tail-stream --- npm-shrinkwrap.json | 5 ----- package.json | 3 +-- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/npm-shrinkwrap.json b/npm-shrinkwrap.json index d2b1c0442..120fbb335 100644 --- a/npm-shrinkwrap.json +++ b/npm-shrinkwrap.json @@ -436,11 +436,6 @@ } } } - }, - "tail-stream": { - "version": "0.2.1", - "from": "http://registry.npmjs.org/tail-stream/-/tail-stream-0.2.1.tgz", - "resolved": "http://registry.npmjs.org/tail-stream/-/tail-stream-0.2.1.tgz" } } } diff --git a/package.json b/package.json index 2b64638d4..0b26ec60e 100644 --- a/package.json +++ b/package.json @@ -22,8 +22,7 @@ "morgan": "^1.5.1", "proxy-middleware": "^0.11.0", "safetydance": "0.0.16", - "superagent": "^0.21.0", - "tail-stream": "^0.2.1" + "superagent": "^0.21.0" }, "devDependencies": { "aws-sdk": "^2.1.10", From 88b682a31753a6f664f4366346f9b1a01e1149f0 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 17 Sep 2015 16:40:06 -0700 Subject: [PATCH 107/234] take ip as first argument instead of --ip --- admin/admin | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/admin/admin b/admin/admin index 1f2ba66f8..490c51b6e 100755 --- a/admin/admin +++ b/admin/admin @@ -116,7 +116,7 @@ function hotfixCloudron(ip, code) { sshExec(ip, CMDS); } -function hotfix(options) { +function hotfix(ip, options) { var code; if (!options.code) { @@ -128,7 +128,7 @@ function hotfix(options) { code = options.code; } - hotfixCloudron(options.ip, code); + hotfixCloudron(ip, code); } function ssh(ip) { @@ -148,8 +148,8 @@ function ssh(ip) { process.stdin.resume(); } -function backup(options) { - triggerBackup(options.ip); +function backup(ip) { + triggerBackup(ip); } // entry point @@ -161,13 +161,11 @@ program.command('ssh') program.command('hotfix') .description('Hotfix a cloudron') - .option('--ip ', 'Cloudron IP') .option('--code ', 'Code tarball') .action(hotfix); program.command('backup') .description('Backup a cloudron') - .option('--ip ', 'Cloudron IP') .action(backup); program.parse(process.argv); From 30320e0ac6717855215ad1a82f02d7bddafcc03f Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 17 Sep 2015 16:44:40 -0700 Subject: [PATCH 108/234] Wait for backup to complete Fixes #351 --- admin/admin | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/admin/admin b/admin/admin index 490c51b6e..69116fa58 100755 --- a/admin/admin +++ b/admin/admin @@ -13,6 +13,7 @@ var assert = require('assert'), readlineSync = require('readline-sync'), spawn = require('child_process').spawn, SshClient = require('ssh2').Client, + superagent = require('superagent'), util = require('util'); require('colors'); @@ -30,8 +31,9 @@ function exit(error) { process.exit(error ? 1 : 0); } -function triggerBackup(ip) { +function triggerBackup(ip, callback) { assert.strictEqual(typeof ip, 'string'); + assert.strictEqual(typeof callback, 'function'); console.log('Trigger backup on %s'.bold, ip.cyan); @@ -53,8 +55,12 @@ function triggerBackup(ip) { }); }); + req.on('end', function () { + callback(); + }); + req.on('error', function (error) { - exit(error); + callback(error); }); req.end(); @@ -148,8 +154,20 @@ function ssh(ip) { process.stdin.resume(); } +function waitForBackup(ip) { + superagent.get('https://' + ip + '/api/v1/cloudron/progress').get(function (error, result) { + if (error) return exit(error); + + console.dir(result); + }); +} + function backup(ip) { - triggerBackup(ip); + triggerBackup(ip, function (error) { + if (error) return exit(error); + + setTimeout(waitForBackup.bind(null, ip), 3000); + }); } // entry point From 28b000c820c39c31f5e707071e5da165c78e8725 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 17 Sep 2015 21:25:07 -0700 Subject: [PATCH 109/234] admin tool is now merged into caas tool --- admin/admin | 199 ---------------------------------------------------- 1 file changed, 199 deletions(-) delete mode 100755 admin/admin diff --git a/admin/admin b/admin/admin deleted file mode 100755 index 69116fa58..000000000 --- a/admin/admin +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/env node - -'use strict'; - -var assert = require('assert'), - async = require('async'), - execSync = require('child_process').execSync, - fs = require('fs'), - https = require('https'), - os = require('os'), - path = require('path'), - program = require('commander'), - readlineSync = require('readline-sync'), - spawn = require('child_process').spawn, - SshClient = require('ssh2').Client, - superagent = require('superagent'), - util = require('util'); - -require('colors'); - -var SSH = 'root@%s -tt -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=10 -i %s'; -var sshKeyPath = path.join(process.env.HOME, '/.ssh/id_rsa_yellowtent'); - -if (!fs.existsSync(sshKeyPath)) exit('Unable to find ssh key path. Searching for ' + sshKeyPath); - -// Allow self signed certs! -process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; - -function exit(error) { - if (error) console.log(error); - process.exit(error ? 1 : 0); -} - -function triggerBackup(ip, callback) { - assert.strictEqual(typeof ip, 'string'); - assert.strictEqual(typeof callback, 'function'); - - console.log('Trigger backup on %s'.bold, ip.cyan); - - var options = { - hostname: ip, - port: 886, - path: '/api/v1/installer/backup', - method: 'POST', - key: fs.readFileSync(path.join(__dirname, '../../keys/installer/server.key')), - cert: fs.readFileSync(path.join(__dirname, '../../keys/installer/server.crt')), - ca: fs.readFileSync(path.join(__dirname, '../../keys/installer_ca/ca.crt')), - rejectUnauthorized: false - }; - - var req = https.request(options, function (res) { - res.setEncoding('utf8'); - res.on('data', function (chunk) { - process.stdout.write(chunk); - }); - }); - - req.on('end', function () { - callback(); - }); - - req.on('error', function (error) { - callback(error); - }); - - req.end(); -} - -function sshExec(ip, cmds) { - var privateKey = path.join(process.env.HOME, '.ssh/id_rsa_yellowtent'); - if (!fs.existsSync(privateKey)) exit('cannot find private key'); - - var sshClient = new SshClient(); - sshClient.connect({ - host: ip, - port: 22, - username: 'root', - privateKey: fs.readFileSync(privateKey) - }); - sshClient.on('ready', function () { - console.log('connected'); - - async.eachSeries(cmds, function (cmd, iteratorDone) { - console.log(cmd.cmd.yellow); - - sshClient.exec(cmd.cmd, function(err, stream) { - if (err) exit(err.message); - - if (cmd.stdin) cmd.stdin.pipe(stream); - stream.pipe(process.stdout); - stream.on('close', function () { - iteratorDone(); - }); - }); - }, function seriesDone(error) { - if (error) exit(error.message); - - console.log('Done patching'.green); - sshClient.end(); - }); - }); - sshClient.on('error', function (error) { - exit(error.message); - }); - sshClient.on('exit', function (exitCode) { - console.log('exit'); - process.exit(exitCode); - }); -} - -function hotfixCloudron(ip, code) { - var CMDS = [ - { cmd: 'systemctl stop cloudron.target' }, - { cmd: 'rm -rf /home/yellowtent/box/* /home/yellowtent/box/.*' }, - { cmd: 'tar zxf - -C /home/yellowtent/box', stdin: fs.createReadStream(code) }, - { cmd: 'cd /home/yellowtent/box && npm rebuild' }, - { cmd: 'chown -R yellowtent.yellowtent /home/yellowtent/box' }, - { cmd: 'sed -e "s/restoreUrl/_restoreUrl/" -i /home/yellowtent/setup_start.sh' }, // do not restore - { cmd: '/home/yellowtent/setup_start.sh' } // ensure db-migrate runs as well - ]; - - sshExec(ip, CMDS); -} - -function hotfix(ip, options) { - var code; - - if (!options.code) { - var answer = readlineSync.question('Create a tarball from repo (y/n)? '); - if (answer !== 'y') return exit(); - code = os.tmpdir() + '/boxtarball.tar.gz'; - execSync(path.join(__dirname, '../images/createBoxTarball --output ' + code + ' --no-upload'), { stdio: [ null, process.stdout, process.stderr ] }); - } else { - code = options.code; - } - - hotfixCloudron(ip, code); -} - -function ssh(ip) { - console.log('Ssh into %s'.bold, ip.cyan); - - var sshClient = spawn('ssh', util.format(SSH, ip, sshKeyPath).split(' ')); - sshClient.on('exit', exit); - sshClient.on('error', exit); - - process.stdin.setEncoding('utf8'); - process.stdin.setRawMode(true); - - process.stdin.pipe(sshClient.stdin); - sshClient.stdout.pipe(process.stdout); - sshClient.stderr.pipe(process.stderr); - - process.stdin.resume(); -} - -function waitForBackup(ip) { - superagent.get('https://' + ip + '/api/v1/cloudron/progress').get(function (error, result) { - if (error) return exit(error); - - console.dir(result); - }); -} - -function backup(ip) { - triggerBackup(ip, function (error) { - if (error) return exit(error); - - setTimeout(waitForBackup.bind(null, ip), 3000); - }); -} - -// entry point -program.version('0.1.0'); - -program.command('ssh') - .description('SSH to cloudron') - .action(ssh); - -program.command('hotfix') - .description('Hotfix a cloudron') - .option('--code ', 'Code tarball') - .action(hotfix); - -program.command('backup') - .description('Backup a cloudron') - .action(backup); - -program.parse(process.argv); - -if (!process.argv.slice(2).length) { - program.outputHelp(); -} else { // https://github.com/tj/commander.js/issues/338 - var knownCommand = program.commands.some(function (command) { return command._name === process.argv[2]; }); - if (!knownCommand) { - console.error('Unknown command: ' + process.argv[2]); - process.exit(1); - } -} From 4c9ec582dcc04adba1889ed192b8f944723c1e4e Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Fri, 18 Sep 2015 14:47:57 -0700 Subject: [PATCH 110/234] 0.0.56 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index c24584c33..0c5c24a1c 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -187,3 +187,6 @@ [0.0.55] - Setup cloudron timezone based on droplet region +[0.0.56] +- Use correct timezone in updater + From 3dec6ac9f1efb386daaf104652c7535aff2a8a84 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 21 Sep 2015 11:12:09 -0700 Subject: [PATCH 111/234] 0.0.57 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 0c5c24a1c..64b29e24c 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -190,3 +190,6 @@ [0.0.56] - Use correct timezone in updater +[0.0.57] +- Fix systemd logging issues + From 5f0bb0c6ce72d6f1d42a04be1cfded2cf09894b7 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 21 Sep 2015 16:25:57 -0700 Subject: [PATCH 112/234] 0.0.58 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 64b29e24c..61b0fab62 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -193,3 +193,6 @@ [0.0.57] - Fix systemd logging issues +[0.0.58] +- Ensure backups of failed apps are retained across archival cycles + From 53fa339363f8038af989348fd3534e9c35b17956 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 21 Sep 2015 21:56:16 -0700 Subject: [PATCH 113/234] 0.0.59 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 61b0fab62..55bd7c0c1 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -196,3 +196,6 @@ [0.0.58] - Ensure backups of failed apps are retained across archival cycles +[0.0.59] +- Installer API fixes + From 5cb1a2d12096f416507e41d028ba6a66ecb87e8b Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 22 Sep 2015 13:04:45 -0700 Subject: [PATCH 114/234] 0.0.60 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 55bd7c0c1..78d4aae85 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -199,3 +199,6 @@ [0.0.59] - Installer API fixes +[0.0.60] +- Do full box backup on updates + From 5885d76b89a679a4662f29512a51edc238795044 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 22 Sep 2015 16:15:43 -0700 Subject: [PATCH 115/234] Version 0.0.61 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 78d4aae85..d56f5f86a 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -202,3 +202,6 @@ [0.0.60] - Do full box backup on updates +[0.0.61] +- Track update notifications to inform admin only once + From 2cfb91d0ce238e242c11c0aa2fe4c38fe2cc923e Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 22 Sep 2015 22:55:55 -0700 Subject: [PATCH 116/234] allow version to be specified in various commands --- release/release | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/release/release b/release/release index 524f41161..3649ff893 100755 --- a/release/release +++ b/release/release @@ -231,7 +231,7 @@ function createRelease(options) { return verifyAndUpload(env, releases, exit); } - var newVersion = options.amend ? lastVersion : semver.inc(lastVersion, 'patch'); + var newVersion = options.amend ? lastVersion : (options.version || semver.inc(lastVersion, 'patch')); releases[lastReachableVersion].next = newVersion; var newImageId = options.image ? parseInt(options.image, 10) : releases[lastReachableVersion].imageId; @@ -488,7 +488,7 @@ function getImageByRevision(env, revision, callback) { }); } -function stage(fromEnv, toEnv) { +function stage(fromEnv, toEnv, stageVersion) { var username = execSync('git config user.name').toString('utf8').trim(); var email = execSync('git config user.email').toString('utf8').trim(); @@ -508,7 +508,7 @@ function stage(fromEnv, toEnv) { var latestFromVersion = Object.keys(fromReleases).sort(semver.rcompare)[0]; var latestToVersion = Object.keys(toReleases).sort(semver.rcompare)[0]; - var nextVersion = semver.inc(latestToVersion, 'patch'); + var nextVersion = stageVersion ||latestFromVersion; // dev and staging are assumed to be 'synced' console.log('Releasing version %s to %s'.gray, nextVersion , toEnv.tag); @@ -569,8 +569,6 @@ function stage(fromEnv, toEnv) { }); } -program.version('0.0.1'); - program.command('amend') .option('--env ', 'Environment (dev/staging/prod)', 'dev') .option('--code ', 'Source code url') @@ -586,6 +584,7 @@ program.command('create') .option('--image ', 'Image id') .option('--changelog ', 'Changelog') .option('--upgrade', 'Set the upgrade flag') + .option('--version ', 'Create the specified version') .description('Create a new release') .action(createRelease); @@ -608,7 +607,8 @@ program.command('new') program.command('publish') .description('Publish latest staging version to production') - .action(stage.bind(null, ENVIRONMENTS['staging'], ENVIRONMENTS['prod'])); + .option('--version ', 'Version to publish') + .action(function (options) { stage(ENVIRONMENTS['staging'], ENVIRONMENTS['prod'], options.version); }); program.command('rerelease') .option('--env ', 'Environment (dev/staging/prod)', 'dev') @@ -622,7 +622,8 @@ program.command('revert') program.command('stage') .description('Stage latest dev version to staging') - .action(stage.bind(null, ENVIRONMENTS['dev'], ENVIRONMENTS['staging'])); + .option('--version ', 'Version to publish') + .action(function (options) { stage(ENVIRONMENTS['dev'], ENVIRONMENTS['staging'], options.version); }); program.command('sync') .option('--env ', 'Environment (dev/staging)', 'dev') From a022bdb30d0d6c6cd43ebc79f5b886d23517fe66 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 22 Sep 2015 22:58:27 -0700 Subject: [PATCH 117/234] set default version to null to override commander built-in --- release/release | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/release/release b/release/release index 3649ff893..bf7909c5b 100755 --- a/release/release +++ b/release/release @@ -584,7 +584,7 @@ program.command('create') .option('--image ', 'Image id') .option('--changelog ', 'Changelog') .option('--upgrade', 'Set the upgrade flag') - .option('--version ', 'Create the specified version') + .option('--version ', 'Create the specified version', null) .description('Create a new release') .action(createRelease); @@ -607,7 +607,7 @@ program.command('new') program.command('publish') .description('Publish latest staging version to production') - .option('--version ', 'Version to publish') + .option('--version ', 'Version to publish', null) .action(function (options) { stage(ENVIRONMENTS['staging'], ENVIRONMENTS['prod'], options.version); }); program.command('rerelease') @@ -622,7 +622,7 @@ program.command('revert') program.command('stage') .description('Stage latest dev version to staging') - .option('--version ', 'Version to publish') + .option('--version ', 'Version to publish', null) .action(function (options) { stage(ENVIRONMENTS['dev'], ENVIRONMENTS['staging'], options.version); }); program.command('sync') From 5c51619798cca8c534c0b7509375850858cf2060 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Sat, 26 Sep 2015 00:04:52 -0700 Subject: [PATCH 118/234] Version 0.0.62 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index d56f5f86a..19a379296 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -205,3 +205,6 @@ [0.0.61] - Track update notifications to inform admin only once +[0.0.62] +- Export bind dn and password from LDAP addon + From f36946a8aaac6eb392aa92e35efae4283cbcdb4f Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Mon, 28 Sep 2015 14:19:31 +0200 Subject: [PATCH 119/234] Forward the backup trigger status code and error message --- src/server.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server.js b/src/server.js index 2eedf8f6c..7a859d6f8 100755 --- a/src/server.js +++ b/src/server.js @@ -79,7 +79,7 @@ function backup(req, res, next) { // !! below port has to be in sync with box/config.js internalPort superagent.post('http://127.0.0.1:3001/api/v1/backup').end(function (error, result) { if (error) return next(new HttpError(500, error)); - if (result.statusCode !== 202) return next(new HttpError(500, 'trigger backup failed with ' + result.statusCode)); + if (result.statusCode !== 202) return next(new HttpError(result.statusCode, 'trigger backup failed with ' + result.body.message)); next(new HttpSuccess(202, {})); }); } From 0d78150f10bfa15d79b243a8f23f9b0219925343 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Mon, 28 Sep 2015 14:33:06 +0200 Subject: [PATCH 120/234] Log forwarding is no more --- src/test/installer-test.js | 56 -------------------------------------- 1 file changed, 56 deletions(-) diff --git a/src/test/installer-test.js b/src/test/installer-test.js index 262d5ead7..c36cc8aa4 100644 --- a/src/test/installer-test.js +++ b/src/test/installer-test.js @@ -108,62 +108,6 @@ describe('Server', function () { }); }); - describe('logs', function () { - before(function (done) { - process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; // TODO: use a installer ca signed cert instead - server.start(done); - }); - - after(function (done) { - server.stop(done); - delete process.env.NODE_TLS_REJECT_UNAUTHORIZED; - }); - - it('needs filename', function (done) { - request.get(EXTERNAL_SERVER_URL + '/api/v1/installer/logs').end(function (error, result) { - expect(!error).to.be.ok(); - expect(result.statusCode).to.equal(400); - done(); - }); - }); - - it('returns stream for valid file', function (done) { - request.get(EXTERNAL_SERVER_URL + '/api/v1/installer/logs?filename=' + __filename).end(function (error, result) { - expect(!error).to.be.ok(); - expect(result.headers['content-length']).to.be('' + fs.statSync(__filename).size); - expect(result.statusCode).to.equal(200); - done(); - }); - }); - - it('returns tail stream for valid file', function (done) { - var tailFile = path.join(os.tmpdir(), 'test-tail'); - fs.writeFileSync(tailFile, 'line 1\n'); - - var res = request.get(EXTERNAL_SERVER_URL + '/api/v1/installer/logs?tail=true&filename=' + tailFile).end(function (error, result) { - expect(!error).to.be.ok(); - expect(result.headers['transfer-encoding']).to.be('chunked'); - expect(result.statusCode).to.equal(200); - - fs.unlinkSync(tailFile); - - done(); - }); - - // push some new log lines to trigger request.get() callback - setTimeout(function () { fs.appendFileSync(tailFile, 'line 2\n'); }, 100); - setTimeout(res.abort.bind(res), 200); - }); - - it('returns 404 for missing file', function (done) { - request.get(EXTERNAL_SERVER_URL + '/api/v1/installer/logs?filename=/tmp/randomtotally').end(function (error, result) { - expect(!error).to.be.ok(); - expect(result.statusCode).to.equal(404); - done(); - }); - }); - }); - describe('retire', function () { var data = { data: { From c9bf0176372db037531121e8722311e3d9c2e3de Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 28 Sep 2015 17:00:18 -0700 Subject: [PATCH 121/234] 0.0.63 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 19a379296..6cefbeab0 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -208,3 +208,6 @@ [0.0.62] - Export bind dn and password from LDAP addon +[0.0.63] +- Fix creation of TXT records + From 0c285f21c1e6d133d75460bdbc7fa30644482f03 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 28 Sep 2015 23:47:13 -0700 Subject: [PATCH 122/234] rework images script --- release/images | 185 +++++++++++++++++++++++-------------------------- 1 file changed, 85 insertions(+), 100 deletions(-) diff --git a/release/images b/release/images index b03c9c344..75469bfff 100755 --- a/release/images +++ b/release/images @@ -7,28 +7,27 @@ require('supererror')({ splatchError: true }); var superagent = require('superagent'), async = require('async'), yesno = require('yesno'), - p = require('commander'); + program = require('commander'); var DIGITALOCEAN = 'https://api.digitalocean.com/v2'; -p.version('0.0.1') - .option('-l, --list', 'List images (default if neither --list or --cleanup provided') - .option('--cleanup', 'Delete images, which are not part of an release') - .option('-a, --all', 'Images from all environments (default if no argument provided)') - .option('-d, --development', 'Images from development') - .option('-s, --staging', 'Images from staging') - .option('-p, --production', 'Images from production') - .parse(process.argv); - -if (p.list || !p.cleanup) { - p.list = true; -} - -if (p.all || !(p.development || p.staging || p.production)) { - p.development = true; - p.staging = true; - p.production = true; -} +var ENVIRONMENTS = { + 'dev': { + tag: 'dev', + releaseUrl: 'https://s3.amazonaws.com/dev-cloudron-releases/versions.json', + digitalOceanToken: process.env.DIGITAL_OCEAN_TOKEN_DEV + }, + 'staging': { + tag: 'staging', + releaseUrl: 'https://s3.amazonaws.com/staging-cloudron-releases/versions.json', + digitalOceanToken: process.env.DIGITAL_OCEAN_TOKEN_STAGING + }, + 'prod': { + tag: 'prod', + releaseUrl: 'https://s3.amazonaws.com/prod-cloudron-releases/versions.json', + digitalOceanToken: process.env.DIGITAL_OCEAN_TOKEN_PROD + } +}; function deleteImage(image, token, callback) { var url = DIGITALOCEAN + '/images/' + image.id; @@ -42,7 +41,7 @@ function deleteImage(image, token, callback) { }); } -function listImages(token, callback) { +function getImages(token, callback) { var images = []; var nextPage = null; @@ -63,26 +62,26 @@ function listImages(token, callback) { }); } -function printEnvironment(tag, items, releases, callback) { +function printImages(env, images, releases, callback) { console.log(''); - console.log('%s:', tag); + console.log('%s:', env.tag); console.log(''); var imageRegExp = new RegExp('box-(?:dev|staging|prod)-[0-9,a-f]{7}-[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]{6}'); - items.forEach(function (item) { - if (!imageRegExp.test(item.name)) return; + images.forEach(function (image) { + if (!imageRegExp.test(image.name)) return; var releaseNumber = []; for (var release in releases) { if (releases.hasOwnProperty(release)) { - if (releases[release].imageId === item.id) { + if (releases[release].imageId === image.id) { releaseNumber.push(release); } } } - console.log(' %s : %s %s\t[%s]', item.id, item.name, releaseNumber.length ? releaseNumber.join(', ') : ' ', item.regions); + console.log(' %s : %s %s\t[%s]', image.id, image.name, releaseNumber.length ? releaseNumber.join(', ') : ' ', image.regions); }); console.log(''); @@ -90,27 +89,27 @@ function printEnvironment(tag, items, releases, callback) { callback(null); } -function cleanupEnvironment(env, tag, items, releases, callback) { +function deleteUnusedImages(env, images, releases, callback) { console.log(''); - console.log('Cleanup images on %s:', tag); + console.log('Cleanup images on %s:', env.tag); var imagesToCleanup = []; var imageRegExp = new RegExp('box-(?:dev|staging|prod)-[0-9,a-f]{7}-[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]{6}'); - items.forEach(function (item) { - if (!imageRegExp.test(item.name)) return; + images.forEach(function (image) { + if (!imageRegExp.test(image.name)) return; for (var release in releases) { if (releases.hasOwnProperty(release)) { - if (releases[release].imageId === item.id) { + if (releases[release].imageId === image.id) { return; } } } // we reached here so no release found - imagesToCleanup.push(item); + imagesToCleanup.push(image); }); if (imagesToCleanup.length === 0) { @@ -118,8 +117,8 @@ function cleanupEnvironment(env, tag, items, releases, callback) { return callback(null); } - imagesToCleanup.forEach(function (item) { - console.log(' %s : %s [%s]', item.id, item.name, item.regions); + imagesToCleanup.forEach(function (image) { + console.log(' %s : %s [%s]', image.id, image.name, image.regions); }); console.log(''); @@ -127,7 +126,7 @@ function cleanupEnvironment(env, tag, items, releases, callback) { yesno.ask('Do you want to delete those images? [y/N]', false, function (ok) { if (ok) { async.each(imagesToCleanup, function (image, callback) { - deleteImage(image, process.env[env], callback); + deleteImage(image, env.digitalOceanToken, callback); }, callback); return; } @@ -136,81 +135,67 @@ function cleanupEnvironment(env, tag, items, releases, callback) { }); } - -function handleListEnvironment(active, env, tag, releaseUrl) { - return function (callback) { - if (!active) return callback(null); - if (!process.env[env]) { - console.log('%s not set. Skipping %s.', env, tag); - return callback(null); - } - - listImages(process.env[env], function (error, result) { - if (error) return callback(error); - - var images = result; - superagent.get(releaseUrl).end(function (error, result) { - if (error || result.error) return callback(error || result.error); - - // we get it as text - var releases = JSON.parse(result.text); - - printEnvironment(tag, images, releases, callback); - }); - }); - }; +function exit(error) { + if (error) console.error(error); + process.exit(error ? 1 : 0); } -function handleCleanupEnvironment(active, env, tag, releaseUrl) { - return function (callback) { - if (!active) return callback(null); - if (!process.env[env]) { - console.log('%s not set. Skipping %s.', env, tag); - return callback(null); - } +function listImages(options) { + var env = ENVIRONMENTS[options.env]; - listImages(process.env[env], function (error, result) { - if (error) return callback(error); + getImages(env.digitalOceanToken, function (error, result) { + if (error) return exit(error); - var images = result; - superagent.get(releaseUrl).end(function (error, result) { - if (error || result.error) return callback(error || result.error); + var images = result; + superagent.get(env.releaseUrl).end(function (error, result) { + if (error || result.error) return exit(error || result.error); - // we get it as text - var releases = JSON.parse(result.text); + // we get it as text + var releases = JSON.parse(result.text); - cleanupEnvironment(env, tag, images, releases, callback); - }); + printImages(env, images, releases, exit); }); - }; -} - -if (p.list) { - async.series([ - handleListEnvironment(p.development, 'DIGITAL_OCEAN_TOKEN_DEV', 'Development', 'https://s3.amazonaws.com/dev-cloudron-releases/versions.json'), - handleListEnvironment(p.staging, 'DIGITAL_OCEAN_TOKEN_STAGING', 'Staging', 'https://s3.amazonaws.com/staging-cloudron-releases/versions.json'), - handleListEnvironment(p.production, 'DIGITAL_OCEAN_TOKEN_PROD', 'Production', 'https://s3.amazonaws.com/prod-cloudron-releases/versions.json') - ], function (error) { - if (error) { - console.log(error); - process.exit(1); - } - - process.exit(0); }); } -if (p.cleanup) { - async.series([ - handleCleanupEnvironment(p.development, 'DIGITAL_OCEAN_TOKEN_DEV', 'Development', 'https://s3.amazonaws.com/dev-cloudron-releases/versions.json'), - handleCleanupEnvironment(p.staging, 'DIGITAL_OCEAN_TOKEN_STAGING', 'Staging', 'https://s3.amazonaws.com/staging-cloudron-releases/versions.json'), - handleCleanupEnvironment(p.production, 'DIGITAL_OCEAN_TOKEN_PROD', 'Production', 'https://s3.amazonaws.com/prod-cloudron-releases/versions.json') - ], function (error) { - if (error) { - console.log(error); - process.exit(1); - } +function cleanupImages(options) { + var env = ENVIRONMENTS[options.env]; - process.exit(0); + getImages(env.digitalOceanToken, function (error, result) { + if (error) return exit(error); + + var images = result; + superagent.get(env.releaseUrl).end(function (error, result) { + if (error || result.error) return exit(error || result.error); + + // we get it as text + var releases = JSON.parse(result.text); + + deleteUnusedImages(env, images, releases, exit); + }); }); } + +program.version('0.0.1'); + +program.command('list') + .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .description('List images of environment') + .action(listImages); + +program.command('cleanup') + .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .description('Cleanup images of environment') + .action(cleanupImages); + +program.parse(process.argv); + +if (!process.argv.slice(2).length) { + program.outputHelp(); +} else { // https://github.com/tj/commander.js/issues/338 + var knownCommand = program.commands.some(function (command) { return command._name === process.argv[2]; }); + if (!knownCommand) { + console.error('Unknown command: ' + process.argv[2]); + process.exit(1); + } +} From 11d4df4f7dacb3ec93463febfd9f0504936cb22a Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 28 Sep 2015 23:55:31 -0700 Subject: [PATCH 123/234] fix loop (by actually using nextPage link) --- release/images | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/release/images b/release/images index 75469bfff..dcecad1c9 100755 --- a/release/images +++ b/release/images @@ -43,12 +43,10 @@ function deleteImage(image, token, callback) { function getImages(token, callback) { var images = []; - var nextPage = null; + var nextPage = DIGITALOCEAN + '/images?private=true'; async.doWhilst(function (callback) { - var url = DIGITALOCEAN + '/images?private=true'; - - superagent.get(url).set('Authorization', 'Bearer ' + token).end(function (error, result) { + superagent.get(nextPage).set('Authorization', 'Bearer ' + token).end(function (error, result) { if (error || result.error) return callback(error || result.error); nextPage = (result.body.links && result.body.links.pages && nextPage !== result.body.links.pages.next) ? result.body.links.pages.next : null; @@ -58,6 +56,7 @@ function getImages(token, callback) { }); }, function () { return !!nextPage; }, function (error) { if (error) return callback(error); + callback(null, images); }); } From b8e115ddf64716f6fb81b59cc5bff1e11dcff0c7 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 28 Sep 2015 23:58:00 -0700 Subject: [PATCH 124/234] move images script --- {release => images}/images | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {release => images}/images (100%) diff --git a/release/images b/images/images similarity index 100% rename from release/images rename to images/images From 748eadd22524156900eeb23f9edb5ad5e07b7372 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 29 Sep 2015 11:58:12 -0700 Subject: [PATCH 125/234] stop apps and installer when retiring cloudron we cannot put this in stop.sh because that is called during update. --- src/scripts/retire.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/scripts/retire.sh b/src/scripts/retire.sh index 1d8363eca..d93fc7c8e 100755 --- a/src/scripts/retire.sh +++ b/src/scripts/retire.sh @@ -24,3 +24,7 @@ done echo "Setting up splash screen" "${BOX_SRC_DIR}/setup/splashpage.sh" --retire --data "${arg_data}" # show splash "${BOX_SRC_DIR}/setup/stop.sh" # stop the cloudron code + +systemctl stop docker # stop the apps +systemctl stop cloudron-installer # stop the installer + From 109f9567eaf21c3506a4d468ed752d287a162dbd Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 29 Sep 2015 13:21:16 -0700 Subject: [PATCH 126/234] 0.0.64 changes --- release/CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 6cefbeab0..b7394a2e0 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -211,3 +211,7 @@ [0.0.63] - Fix creation of TXT records +[0.0.64] +- Stop apps in a retired cloudron +- Retry downloading application on failure + From 8ef15df7c0686659c876103eef20856a3271bc37 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 7 Oct 2015 18:49:53 -0700 Subject: [PATCH 127/234] 0.0.65 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index b7394a2e0..33bcdad25 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -215,3 +215,6 @@ - Stop apps in a retired cloudron - Retry downloading application on failure +[0.0.65] +- Do not send crash mails for apps in development + From 390285d9e55bf9b8f375ee9b5a5fcc5e28f0c090 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 8 Oct 2015 13:16:23 -0700 Subject: [PATCH 128/234] version 0.0.66 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 33bcdad25..dba3ef0b9 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -218,3 +218,6 @@ [0.0.65] - Do not send crash mails for apps in development +[0.0.66] +- Readonly application and addon containers + From 75d69050d5214464b0c8bfadd3b76e99dc729aac Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Fri, 9 Oct 2015 10:06:11 -0700 Subject: [PATCH 129/234] 0.0.67 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index dba3ef0b9..5c33822c2 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -221,3 +221,6 @@ [0.0.66] - Readonly application and addon containers +[0.0.67] +- Fix email notifications + From c453df55d6a246e69e8fd1da77931943eea82819 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Fri, 9 Oct 2015 12:05:03 -0700 Subject: [PATCH 130/234] More 0.0.67 changes --- release/CHANGES | 1 + 1 file changed, 1 insertion(+) diff --git a/release/CHANGES b/release/CHANGES index 5c33822c2..9d8bbaef4 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -223,4 +223,5 @@ [0.0.67] - Fix email notifications +- Fix bug when restoring from certain backups From 8a77242072388fdfce2aeb570b6160260ff22f4e Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Fri, 9 Oct 2015 13:10:52 -0700 Subject: [PATCH 131/234] clear version field in rerelease --- release/release | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/release/release b/release/release index bf7909c5b..b45f19607 100755 --- a/release/release +++ b/release/release @@ -232,6 +232,10 @@ function createRelease(options) { } var newVersion = options.amend ? lastVersion : (options.version || semver.inc(lastVersion, 'patch')); + // guard against options.version being commander's version function. any command using this code path needs to explicitly clear the version + // this is the price to pay for using --version with commander + assert(semver.valid(newVersion)); + releases[lastReachableVersion].next = newVersion; var newImageId = options.image ? parseInt(options.image, 10) : releases[lastReachableVersion].imageId; @@ -612,6 +616,7 @@ program.command('publish') program.command('rerelease') .option('--env ', 'Environment (dev/staging/prod)', 'dev') + .option('--version ', 'Create the specified version', null) .description('Make a new release, same as the last release') .action(function (options) { options.rerelease = true; createRelease(options); }); From b3fa76f8c5b6a0217fa8481eb39a8c52b7cf4ad6 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Sat, 10 Oct 2015 14:39:53 -0700 Subject: [PATCH 132/234] 0.0.68 changes --- release/CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 9d8bbaef4..131e3be07 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -225,3 +225,6 @@ - Fix email notifications - Fix bug when restoring from certain backups +[0.0.68] +- Update graphite image + From bf18307168650e3a9e448404be605d1eb4b5c6a4 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Sun, 11 Oct 2015 18:13:36 +0200 Subject: [PATCH 133/234] 0.0.69 changes --- release/CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 131e3be07..35183f9a5 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -228,3 +228,5 @@ [0.0.68] - Update graphite image +[0.0.69] +- Add simpleauth addon support From 5c9ff468cc9a3fdf2981ddb2c63674e368469d26 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Sun, 11 Oct 2015 18:48:24 +0200 Subject: [PATCH 134/234] Add python to the base image --- images/initializeBaseUbuntuImage.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 7e1a2989d..9b543f25b 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -159,6 +159,7 @@ echo "==== Install nodejs ====" apt-get install -y curl curl -sL https://deb.nodesource.com/setup_0.12 | bash - apt-get install -y nodejs +apt-get install -y python # Install python which is required for npm rebuild echo "=== Rebuilding npm packages ===" cd "${INSTALLER_SOURCE_DIR}" && npm install --production From 99489e5e7725ca7af67fcc6401364eef4ab7188e Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Sun, 11 Oct 2015 10:02:44 -0700 Subject: [PATCH 135/234] install specific version of python --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 9b543f25b..e873d5fe6 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -159,7 +159,7 @@ echo "==== Install nodejs ====" apt-get install -y curl curl -sL https://deb.nodesource.com/setup_0.12 | bash - apt-get install -y nodejs -apt-get install -y python # Install python which is required for npm rebuild +apt-get install -y python2.7 # Install python which is required for npm rebuild echo "=== Rebuilding npm packages ===" cd "${INSTALLER_SOURCE_DIR}" && npm install --production From 12eae2c002ba335631a09082ac372b69ae4b0e9f Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Sun, 11 Oct 2015 16:03:08 -0700 Subject: [PATCH 136/234] remove 0.0.69 --- release/CHANGES | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/release/CHANGES b/release/CHANGES index 35183f9a5..8ebed735c 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -227,6 +227,5 @@ [0.0.68] - Update graphite image - -[0.0.69] - Add simpleauth addon support + From 8303991217c6e7b833b7028d610b87c486d70626 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Mon, 12 Oct 2015 15:02:05 +0200 Subject: [PATCH 137/234] Revert "install specific version of python" The specific version does not create /usr/bin/python but only the exact version /usr/bin/python2.7 The meta package python is the only one creating that link and according to https://wiki.ubuntu.com/Python/3 /usr/bin/python will not point to version 3 anytime soon at all. This reverts commit be128bbecfd2afe9ef2bdca603a3b26e8ccced7b. --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index e873d5fe6..9b543f25b 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -159,7 +159,7 @@ echo "==== Install nodejs ====" apt-get install -y curl curl -sL https://deb.nodesource.com/setup_0.12 | bash - apt-get install -y nodejs -apt-get install -y python2.7 # Install python which is required for npm rebuild +apt-get install -y python # Install python which is required for npm rebuild echo "=== Rebuilding npm packages ===" cd "${INSTALLER_SOURCE_DIR}" && npm install --production From cd9602e641c33e33eaad7b4d1fec0379cb1cc2c2 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Mon, 12 Oct 2015 16:47:25 +0200 Subject: [PATCH 138/234] changes 0.0.69 --- release/CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/release/CHANGES b/release/CHANGES index 8ebed735c..f1ae0193e 100644 --- a/release/CHANGES +++ b/release/CHANGES @@ -229,3 +229,5 @@ - Update graphite image - Add simpleauth addon support +[0.0.69] +- Support newer manifest format From 51e2e5ec9cb9dfa5d9182f8f612faec6beb570cb Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Mon, 12 Oct 2015 19:37:31 +0200 Subject: [PATCH 139/234] Use a temporary file for the release tarball copy The pipe failed for me a couple of times in a row --- release/release | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/release/release b/release/release index b45f19607..13dde4abc 100755 --- a/release/release +++ b/release/release @@ -535,16 +535,20 @@ function stage(fromEnv, toEnv, stageVersion) { var sourceTarballName = url.parse(fromReleases[latestReachableFromVersion].sourceTarballUrl).pathname.substr(1); var upgrade = fromReleases[latestReachableFromVersion].upgrade; + var tmpFile = '/tmp/' + sourceTarballName; console.log('Copying source code tarball %s to %s'.gray, sourceTarballName, toEnv.tag); - var cmd = util.format( - 's3cmd get -v --ssl --access_key="%s" --secret_key="%s" "s3://%s/%s" - ' + - ' | s3cmd put -v --ssl --add-header=x-amz-acl:authenticated-read --access_key="%s" --secret_key="%s" - "s3://%s/%s"', - fromEnv.accessKeyId, fromEnv.secretAccessKey, fromEnv.releasesBucket, sourceTarballName, - toEnv.accessKeyId, toEnv.secretAccessKey, toEnv.releasesBucket, sourceTarballName + console.log('Fetching source code tarball...'.gray); + var cmd = util.format('s3cmd get --force -v --ssl --access_key="%s" --secret_key="%s" "s3://%s/%s" %s', + fromEnv.accessKeyId, fromEnv.secretAccessKey, fromEnv.releasesBucket, sourceTarballName, tmpFile ); + execSync(cmd, { stdio: [ null, process.stdout, process.stderr ] } ); + console.log('Uploading source code tarball...'.gray); + cmd = util.format('s3cmd put -v --ssl --add-header=x-amz-acl:authenticated-read --access_key="%s" --secret_key="%s" %s "s3://%s/%s"', + toEnv.accessKeyId, toEnv.secretAccessKey, tmpFile, toEnv.releasesBucket, sourceTarballName + ); execSync(cmd, { stdio: [ null, process.stdout, process.stderr ] } ); var strippedToReleases = stripUnreachable(toReleases); @@ -561,6 +565,8 @@ function stage(fromEnv, toEnv, stageVersion) { next: null }; + safe.fs.unlinkSync(tmpFile); + verifyAndUpload(toEnv, toReleases, function (error) { if (error) return exit(error); From 04e27496bdf3b43f6fea56af8ba7a8ae4df528ab Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 12 Oct 2015 10:52:55 -0700 Subject: [PATCH 140/234] remove tmpreaper (will use systemd for this) --- images/initializeBaseUbuntuImage.sh | 4 ---- 1 file changed, 4 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 9b543f25b..db81658d1 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -146,10 +146,6 @@ echo "==== Install collectd ===" apt-get install -y collectd collectd-utils update-rc.d -f collectd remove -echo "=== Install tmpreaper ===" -apt-get install -y tmpreaper -sed -e 's/SHOWWARNING=true/# SHOWWARNING=true/' -i /etc/tmpreaper.conf - echo "==== Extracting installer source ====" rm -rf "${INSTALLER_SOURCE_DIR}" && mkdir -p "${INSTALLER_SOURCE_DIR}" tar xvf /root/installer.tar -C "${INSTALLER_SOURCE_DIR}" && rm /root/installer.tar From 7fe1b02ccdef3544d5bc0c5cf11cdf99e8ab96d7 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 12 Oct 2015 11:30:04 -0700 Subject: [PATCH 141/234] moved to release/ repo --- release/CHANGES | 233 -------------- release/parsechangelog.js | 31 -- release/release | 660 -------------------------------------- release/versions.json | 418 ------------------------ release/versionsformat.js | 71 ---- 5 files changed, 1413 deletions(-) delete mode 100644 release/CHANGES delete mode 100644 release/parsechangelog.js delete mode 100755 release/release delete mode 100644 release/versions.json delete mode 100644 release/versionsformat.js diff --git a/release/CHANGES b/release/CHANGES deleted file mode 100644 index f1ae0193e..000000000 --- a/release/CHANGES +++ /dev/null @@ -1,233 +0,0 @@ -[0.0.1] -- Hot Chocolate - -[0.0.2] -- Hotfix appstore ui in webadim - -[0.0.3] -- Tall Pike - -[0.0.4] -- This will be 0.0.4 changes - -[0.0.5] -- App install/configure route fixes - -[0.0.6] -- Not sure what happenned here - -[0.0.7] -- resetToken is now sent as part of create user -- Same as 0.0.7 which got released by mistake - -[0.0.8] -- Manifest changes - -[0.0.9] -- Fix app restore -- Fix backup issues - -[0.0.10] -- Unknown orchestra - -[0.0.11] -- Add ldap addon - -[0.0.12] -- Support OAuth2 state - -[0.0.13] -- Use docker image from cloudron repository - -[0.0.14] -- Improve setup flow - -[0.0.15] -- Improved Appstore view - -[0.0.16] -- Improved Backup approach - -[0.0.17] -- Upgrade testing -- App auto updates -- Usage graphs - -[0.0.18] -- Rework backups and updates - -[0.0.19] -- Graphite fixes -- Avatar and Cloudron name support - -[0.0.20] -- Apptask fixes -- Chrome related fixes - -[0.0.21] -- Increase nginx hostname size to 64 - -[0.0.22] -- Testing the e2e tests - -[0.0.23] -- Better error status page -- Fix updater and backup progress reporting -- New avatar set -- Improved setup wizard - -[0.0.24] -- Hotfix the ldap support - -[0.0.25] -- Add support page -- Really fix ldap issues - -[0.0.26] -- Add configurePath support - -[0.0.27] -- Improved log collector - -[0.0.28] -- Improve app feedback -- Restyle login page - -[0.0.29] -- Update to ubuntu 15.04 - -[0.0.30] -- Move to docker 1.7 - -[0.0.31] -- WARNING: This update restarts your containers -- System processes are prioritized over apps -- Add ldap group support - -[0.0.32] -- MySQL addon update - -[0.0.33] -- Fix graphs -- Fix MySQL 5.6 memory usage - -[0.0.34] -- Correctly mark apps pending for approval - -[0.0.35] -- Fix ldap admin group username - -[0.0.36] -- Fix restore without backup -- Optimize image deletion during updates -- Add memory accounting -- Restrict access to metadata from containers - -[0.0.37] -- Prepare for Selfhosting 1. part -- Use userData instead of provisioning calls - -[0.0.38] -- Account for Ext4 reserved block when partitioning disk - -[0.0.39] -- Move subdomain management to the cloudron - -[0.0.40] -- Add journal limit -- Fix reprovisioning on reboot -- Fix subdomain management during startup - -[0.0.41] -- Finally bring things to a sane state - -[0.0.42] -- Parallel apptask - -[0.0.43] -- Move to systemd - -[0.0.44] -- Fix apptask concurrency bug - -[0.0.45] -- Retry subdomain registration - -[0.0.46] -- Fix app update email notification - -[0.0.47] -- Ensure box code quits within 5 seconds - -[0.0.48] -- Styling fixes -- Improved session handling - -[0.0.49] -- Fix app autoupdate logic - -[0.0.50] -- Use domainmanagement via CaaS - -[0.0.51] -- Fix memory management - -[0.0.52] -- Restrict addons memory -- Get nofication about container OOMs - -[0.0.53] -- Restrict addons memory -- Get notification about container OOMs -- Add retry to subdomain logic - -[0.0.54] -- OAuth Proxy now uses internal port forwarding - -[0.0.55] -- Setup cloudron timezone based on droplet region - -[0.0.56] -- Use correct timezone in updater - -[0.0.57] -- Fix systemd logging issues - -[0.0.58] -- Ensure backups of failed apps are retained across archival cycles - -[0.0.59] -- Installer API fixes - -[0.0.60] -- Do full box backup on updates - -[0.0.61] -- Track update notifications to inform admin only once - -[0.0.62] -- Export bind dn and password from LDAP addon - -[0.0.63] -- Fix creation of TXT records - -[0.0.64] -- Stop apps in a retired cloudron -- Retry downloading application on failure - -[0.0.65] -- Do not send crash mails for apps in development - -[0.0.66] -- Readonly application and addon containers - -[0.0.67] -- Fix email notifications -- Fix bug when restoring from certain backups - -[0.0.68] -- Update graphite image -- Add simpleauth addon support - -[0.0.69] -- Support newer manifest format diff --git a/release/parsechangelog.js b/release/parsechangelog.js deleted file mode 100644 index 19d9c6509..000000000 --- a/release/parsechangelog.js +++ /dev/null @@ -1,31 +0,0 @@ -'use strict'; - -var fs = require('fs'); - -exports = module.exports = { - parse: parse -}; - -function parse(version) { - var changelog = [ ]; - var lines = fs.readFileSync(__dirname + '/CHANGES', 'utf8').split('\n'); - for (var i = 0; i < lines.length; i++) { - if (lines[i] === '[' + version + ']') break; - } - - for (i = i + 1; i < lines.length; i++) { - if (lines[i] === '') continue; - if (lines[i][0] === '[') break; - - lines[i] = lines[i].trim(); - - // detect and remove list style - and * in changelog lines - if (lines[i].indexOf('-') === 0) lines[i] = lines[i].slice(1).trim(); - if (lines[i].indexOf('*') === 0) lines[i] = lines[i].slice(1).trim(); - - changelog.push(lines[i]); - } - - return changelog; -} - diff --git a/release/release b/release/release deleted file mode 100755 index 13dde4abc..000000000 --- a/release/release +++ /dev/null @@ -1,660 +0,0 @@ -#!/usr/bin/env node - -'use strict'; - -require('supererror')({ splatchError: true }); -require('colors'); - -var assert = require('assert'), - async = require('async'), - AWS = require('aws-sdk'), - execSync = require('child_process').execSync, - os = require('os'), - parseChangelog = require('./parsechangelog.js').parse, - path = require('path'), - postmark = require('postmark')(process.env.POSTMARK_API_KEY_TOOLS), - program = require('commander'), - safe = require('safetydance'), - semver = require('semver'), - superagent = require('superagent'), - Table = require('easy-table'), - url = require('url'), - util = require('util'), - versionsFormat = require('./versionsformat.js'), - yesno = require('yesno'), - _ = require('underscore'); - -var DIGITALOCEAN = 'https://api.digitalocean.com/v2'; - -var ENVIRONMENTS = { - 'dev': { - tag: 'dev', - url: 'https://s3.amazonaws.com/dev-cloudron-releases/versions.json', - accessKeyId: process.env.AWS_DEV_ACCESS_KEY, - secretAccessKey: process.env.AWS_DEV_SECRET_KEY, - releasesBucket: 'dev-cloudron-releases', - digitalOceanToken: process.env.DIGITAL_OCEAN_TOKEN_DEV - }, - 'staging': { - tag: 'staging', - url: 'https://s3.amazonaws.com/staging-cloudron-releases/versions.json', - accessKeyId: process.env.AWS_STAGING_ACCESS_KEY, - secretAccessKey: process.env.AWS_STAGING_SECRET_KEY, - releasesBucket: 'staging-cloudron-releases', - digitalOceanToken: process.env.DIGITAL_OCEAN_TOKEN_STAGING - }, - 'prod': { - tag: 'prod', - url: 'https://s3.amazonaws.com/prod-cloudron-releases/versions.json', - accessKeyId: process.env.AWS_PROD_ACCESS_KEY, - secretAccessKey: process.env.AWS_PROD_SECRET_KEY, - releasesBucket: 'prod-cloudron-releases', - digitalOceanToken: process.env.DIGITAL_OCEAN_TOKEN_PROD - } -}; - -function exit(error) { - if (error) console.error(error.message ? error.message.red : error); - - process.exit(error ? 1 : 0); -} - -function stripUnreachable(releases) { - var reachableVersions = [ ]; - var curVersion = '0.0.1'; - - while (true) { - reachableVersions.push(curVersion); - var nextVersion = releases[curVersion].next; - if (!nextVersion) break; - curVersion = nextVersion; - } - - return _.pick(releases, reachableVersions); -} - -function notifyAdmins(env, releases, callback) { - console.log('Notifying admins about new release'.gray); - - var sortedVersions = Object.keys(releases).sort(semver.compare); - var oldVersion = sortedVersions[sortedVersions.length - 2], - newVersion = sortedVersions[sortedVersions.length - 1]; - - var oldImageRef = releases[oldVersion].imageName.match('box-(prod|staging|dev)-([0-9a-z.]+)-.*')[2], - newImageRef = releases[newVersion].imageName.match('box-(prod|staging|dev)-([0-9a-z.]+)-.*')[2]; - - var imageLogs = execSync(util.format('git fetch && git log %s..%s --format=oneline', oldImageRef, newImageRef), { cwd: __dirname }).toString('utf8'), - imageStat = execSync(util.format('git diff --stat %s..%s', oldImageRef, newImageRef), { cwd: __dirname }).toString('utf8'); - - var oldBoxRef = url.parse(releases[oldVersion].sourceTarballUrl).path.match('/box-(.*).tar.gz')[1], - newBoxRef = url.parse(releases[newVersion].sourceTarballUrl).path.match('/box-(.*).tar.gz')[1]; - - var boxRepo = path.resolve(__dirname, '../../box'); - - var boxLogs = execSync(util.format('git fetch && git log %s..%s --format=oneline', oldBoxRef, newBoxRef), { cwd: boxRepo }).toString('utf8'), - boxStat = execSync(util.format('git diff --stat %s..%s', oldBoxRef, newBoxRef), { cwd: boxRepo }).toString('utf8'); - - var textBody = util.format( - 'A new box release was pushed by %s.\n\n' + - 'Image Changes\n' + - '-----------------\n' + - '%s\n\n%s\n\n' + - 'Box Changes\n' + - '-----------\n' + - '%s\n\n%s\n\n' + - 'Changelog\n' + - '---------\n' + - '%s\n\n' + - 'Release json\n' + - '------------\n' + - '%s\n\n' + - 'Regards,\n' + - 'Release team\n', - releases[newVersion].author, imageLogs, imageStat, boxLogs, boxStat, - releases[newVersion].changelog, JSON.stringify(releases[newVersion], null, 4)); - - postmark.send({ - 'From': 'no-reply@cloudron.io', - 'To': 'admin@cloudron.io', - 'Subject': util.format('[%s] New box release %s', env.tag, newVersion), - 'TextBody': textBody, - 'Tag': 'Important' - }, callback); -} - -function verifyAndUpload(env, releases, callback) { - assert.strictEqual(typeof env, 'object'); - assert.strictEqual(typeof releases, 'object'); - assert.strictEqual(typeof callback, 'function'); - - var s3 = new AWS.S3({ - accessKeyId: env.accessKeyId, - secretAccessKey: env.secretAccessKey - }); - - var error = versionsFormat.verify(releases); - if (error) return callback(error); - - s3.putObject({ - Bucket: env.releasesBucket, - Key: 'versions.json', - ACL: 'public-read', - Body: JSON.stringify(releases, null, 4), - ContentType: 'application/json' - }, function (error, data) { - if (error) return callback(error); - - console.log('Uploaded'.green); - - callback(null); - }); -} - -function newRelease(options) { - var env = ENVIRONMENTS[options.env]; - if (!env) exit(new Error(util.format('Unknown environment %s', options.env))); - - if (!options.file) exit(new Error('--file is required')); - - var contents = safe.fs.readFileSync(options.file, 'utf8'); - if (!contents) exit(safe.error); - - var releases = safe.JSON.parse(contents); - if (!releases) exit(new Error(options.file + ' has invalid json :' + safe.error.message)); - - verifyAndUpload(env, releases, exit); -} - -function edit(options) { - var env = ENVIRONMENTS[options.env]; - if (!env) exit(new Error(util.format('Unknown environment %s', options.env))); - - superagent.get(env.url).end(function (error, result) { - if (error || result.error) return exit(error || result.error); - - var oldContents = result.type === 'application/json' ? JSON.stringify(result.body, null, 4) : result.text; - var tmpfile = path.join(os.tmpdir(), 'versions.json'); - safe.fs.writeFileSync(tmpfile, oldContents); - - var editor = require('child_process').spawn(process.env.EDITOR || 'vim', [tmpfile], {stdio: 'inherit'}); - editor.on('error', exit); - editor.on('exit', function () { - var newContents = safe.fs.readFileSync(tmpfile, 'utf8'); - if (!newContents || newContents.trim().length === 0 || newContents === oldContents) return exit('Unchanged'); - - var releases = safe.JSON.parse(newContents); - if (!releases) exit(new Error(options.file + ' has invalid json :' + safe.error.message)); - - verifyAndUpload(env, releases, exit); - }); - }); -} - -function createRelease(options) { - var env = ENVIRONMENTS[options.env]; - if (!env) exit(new Error(util.format('Unknown environment %s', options.env))); - - if (env.tag === 'prod') { - if (options.revert || options.rerelease || options.revert) return exit(new Error('operation is not allowed in prod')); - } - - if (!options.rerelease && !options.revert) { - if (!options.code && !options.image) exit(new Error('--code or --image is required')); - } - - if (options.image && !parseInt(options.image, 10)) exit('image must be a number'); - if (options.code && !safe.url.parse(options.code)) exit('code must be a valid url'); - - var username = execSync('git config user.name').toString('utf8').trim(); - var email = execSync('git config user.email').toString('utf8').trim(); - - superagent.get(env.url).end(function (error, result) { - if (error || result.error) return exit(error || result.error); - - var releases = result.type === 'application/json' ? result.body : safe.JSON.parse(result.text); - - if (!releases) exit(new Error('versions.json is not valid JSON')); - - var strippedReleases = stripUnreachable(releases); - var lastReachableVersion = Object.keys(strippedReleases).sort(semver.rcompare)[0]; - - var sortedVersions = Object.keys(releases).sort(semver.rcompare); - var lastVersion = sortedVersions[0]; - - if (options.revert) { - var secondLastVersion = sortedVersions[1]; - - releases[secondLastVersion].next = null; - delete releases[lastVersion]; - - console.log('Reverting %s'.gray, lastVersion); - return verifyAndUpload(env, releases, exit); - } - - var newVersion = options.amend ? lastVersion : (options.version || semver.inc(lastVersion, 'patch')); - // guard against options.version being commander's version function. any command using this code path needs to explicitly clear the version - // this is the price to pay for using --version with commander - assert(semver.valid(newVersion)); - - releases[lastReachableVersion].next = newVersion; - - var newImageId = options.image ? parseInt(options.image, 10) : releases[lastReachableVersion].imageId; - var sourceTarballUrl = options.code || releases[lastReachableVersion].sourceTarballUrl; - var upgrade = options.upgrade || (releases[lastReachableVersion].imageId !== newImageId); - - // check if we have a changelog otherwise - var changelog = parseChangelog(newVersion); - if (changelog.length === 0) console.log('No changelog for version %s found.'.yellow, newVersion.bold); - - var url = DIGITALOCEAN + '/images/' + newImageId; - superagent.get(url).set('Authorization', 'Bearer ' + env.digitalOceanToken).end(function (error, result) { - if (error || result.error) return exit(error || result.error); - - releases[newVersion] = { - sourceTarballUrl: sourceTarballUrl, - imageId: newImageId, - imageName: result.body.image.name, - changelog: changelog, - upgrade: upgrade, - date: (new Date()).toISOString(), - author: username + ' <' + email + '>', - next: null - }; - - verifyAndUpload(env, releases, function (error) { - if (error) return exit(error); - - console.log('%s : %s', newVersion, JSON.stringify(releases[newVersion], null, 4)); - - exit(); - }); - }); - }); -} - -function listRelease(options) { - var env = ENVIRONMENTS[options.env]; - if (!env) exit(new Error(util.format('Unknown environment %s', options.env))); - - var raw = !!options.raw; - - superagent.get(env.url).end(function (error, result) { - if (error || result.error) return exit(error || result.error); - - if (raw) { - console.log(JSON.stringify(result.body, null, 4)); - exit(null); - } - - console.log(''); - console.log('%s:'.gray, env.tag); - console.log(''); - - if (result.type !== 'application/json') { - console.log('The content type of the release file is %s. It should be application/json something might have gone wrong!'.red, result.type); - console.log('Trying to parse it anyway...'); - console.log(''); - result.body = safe.JSON.parse(result.text); - if (!result.body) { - console.log('Release file is not valid JSON!'.red); - exit(); - } - } - - if (Object.keys(result.body).length === 0) { - console.log('No releases'); - exit(null); - } - - var strippedReleases = stripUnreachable(result.body); - - var t = new Table(); - - for (var release in result.body) { - t.cell('Release', release in strippedReleases ? release.white : release.gray); - t.cell('Image ID', result.body[release].imageId + (result.body[release].upgrade ? '*' : '')); - t.cell('Image Name', result.body[release].imageName.match(/box-(prod|staging|dev)-([\w]+)-.*/)[2]); - t.cell('Date', result.body[release].date); - t.cell('Author', result.body[release].author.split(' ')[0]); - t.cell('Next', result.body[release].next); - t.cell('Source', result.body[release].sourceTarballUrl.match(/\/box-(.*).tar.gz/)[1].slice(0, 7)); - t.newRow(); - } - - console.log(t.toString()); - - exit(null); - }); -} - -function touchRelease(options, callback) { - var env = ENVIRONMENTS[options.env]; - if (!env) exit(new Error(util.format('Unknown environment %s', options.env))); - - superagent.get(env.url).end(function (error, result) { - if (error || result.error) return exit(error || result.error); - - var strippedReleases = stripUnreachable(result.body); - - var latestVersion = Object.keys(strippedReleases).sort(semver.rcompare)[0]; - result.body[latestVersion].date = (new Date()).toISOString(); - - verifyAndUpload(env, result.body, exit); - }); -} - -function listImages(token, callback) { - var images = []; - var nextPage = DIGITALOCEAN + '/images?private=true'; - - async.doWhilst(function (callback) { - superagent.get(nextPage).set('Authorization', 'Bearer ' + token).end(function (error, result) { - if (error || result.error) return callback(error || result.error); - - nextPage = (result.body.links && result.body.links.pages && nextPage !== result.body.links.pages.next) ? result.body.links.pages.next : null; - images = images.concat(result.body.images); - - callback(null); - }); - }, function () { return !!nextPage; }, function (error) { - if (error) return callback(error); - callback(null, images); - }); -} - -function sync(options) { - var destEnv = ENVIRONMENTS[options.env]; - if (!destEnv) exit(new Error(util.format('Unknown environment %s', options.env))); - - var sourceEnv; - - if (destEnv.tag === 'staging') sourceEnv = ENVIRONMENTS['prod']; - else if (destEnv.tag === 'dev') sourceEnv = ENVIRONMENTS['staging']; - else exit('Unable to determine source environment to sync from'); - - console.log('Syncing %s to %s', sourceEnv.tag.cyan.bold, destEnv.tag.cyan.bold); - - var S3 = new AWS.S3({ - accessKeyId: destEnv.accessKeyId, - secretAccessKey: destEnv.secretAccessKey - }); - - superagent.get(sourceEnv.url).end(function (error, result) { - if (error || result.error) exit(error || result.error); - - var sourceReleases = result.body; - var destReleases = {}; - - var params = { - Bucket: destEnv.releasesBucket, - Prefix: 'box-' - }; - - S3.listObjects(params, function(error, data) { - if (error) exit(error); - - var devSourceTarballs = data.Contents; - - listImages(destEnv.digitalOceanToken, function (error, images) { - if (error) exit(error); - - for (var release in sourceReleases) { - var match = sourceReleases[release].imageName.match(/box-(?:prod|staging|dev)-(.*)-\d\d\d\d-\d\d-\d\d/); - if (!match || !match[1]) exit('Unable to parse image name %s of release %s.', sourceReleases[release].imageName, release); - - var sourceImageRevision = match[1]; - - // find a suitable image and sourceTarballUrl on dev - var suitableImage = null; - var suitableSourceTarball = null; - - images.forEach(function (image) { - if (image.name.indexOf(util.format('box-%s-%s', destEnv.tag, sourceImageRevision)) === 0) { - suitableImage = image; - } - }); - - devSourceTarballs.forEach(function (tarball) { - if (sourceReleases[release].sourceTarballUrl.indexOf(tarball.Key) !== -1) { - suitableSourceTarball = 'https://' + destEnv.releasesBucket + '.s3.amazonaws.com/' + tarball.Key; - } - }); - - if (!suitableImage) { - console.log('Unable to find a suitable image on %s for release %s.', destEnv.tag, release); - console.log('Required image revision is %s', sourceImageRevision); - process.exit(1); - } - - if (!suitableSourceTarball) { - console.log('Unable to find a suitable source tarball on %s for release %s.', destEnv.tag, release); - console.log('Required source tarball is %s', sourceReleases[release].sourceTarballUrl.slice(sourceReleases[release].sourceTarballUrl.lastIndexOf('/') + 1)); - process.exit(1); - } - - destReleases[release] = { - sourceTarballUrl: suitableSourceTarball, - imageId: suitableImage.id, - imageName: suitableImage.name, - changelog: sourceReleases[release].changelog, - upgrade: sourceReleases[release].upgrade, - date: sourceReleases[release].date, - author: sourceReleases[release].author, - next: sourceReleases[release].next - }; - } - - console.log('Potential %s release file:', destEnv.tag); - console.log(''); - console.log(destReleases); - console.log(''); - - yesno.ask('Do you want to upload that release file? [y/N]', false, function (ok) { - if (!ok) process.exit(1); - - var params = { - Bucket: destEnv.releasesBucket, - Key: 'versions.json', - ACL: 'public-read', - Body: JSON.stringify(destReleases, null, 4), - ContentType: 'application/json' - }; - - S3.putObject(params, function(error, data) { - if (error) { - console.error(error); - process.exit(1); - } - - console.log('Upload successful.'); - process.exit(0); - }); - }); - }); - }); - }); -} - -function getImageByRevision(env, revision, callback) { - assert.strictEqual(typeof revision, 'string'); - assert.strictEqual(typeof callback, 'function'); - - var url = DIGITALOCEAN + '/images?per_page=100'; - superagent.get(url).set('Authorization', 'Bearer ' + env.digitalOceanToken).end(function (error, result) { - if (error || result.error) return exit(error || result.error); - - var images = result.body.images; - for (var i = 0; i < images.length; i++) { - if (images[i].name.indexOf('box-' + env.tag + '-' + revision) === 0) return callback(null, images[i]); - } - - callback(new Error('No image for ' + revision)); - }); -} - -function stage(fromEnv, toEnv, stageVersion) { - var username = execSync('git config user.name').toString('utf8').trim(); - var email = execSync('git config user.email').toString('utf8').trim(); - - console.log('Staging from %s -> %s'.gray, fromEnv.tag, toEnv.tag); - - superagent.get(fromEnv.url).end(function (error, result) { - if (error || result.error) return exit(error || result.error); - - var fromReleases = result.type === 'application/json' ? result.body : safe.JSON.parse(result.text); - if (!fromReleases) exit(new Error('versions.json is not valid JSON')); - - superagent.get(toEnv.url).end(function (error, result) { - if (error || result.error) return exit(error || result.error); - - var toReleases = result.type === 'application/json' ? result.body : safe.JSON.parse(result.text); - if (!toReleases) exit(new Error('versions.json is not valid JSON')); - - var latestFromVersion = Object.keys(fromReleases).sort(semver.rcompare)[0]; - var latestToVersion = Object.keys(toReleases).sort(semver.rcompare)[0]; - var nextVersion = stageVersion ||latestFromVersion; // dev and staging are assumed to be 'synced' - - console.log('Releasing version %s to %s'.gray, nextVersion , toEnv.tag); - - // check if we even have a new version to stage - if (latestFromVersion === latestToVersion) exit(util.format('No new version on %s to stage.', fromEnv.tag)); - - // check if we have a changelog - var changelog = parseChangelog(nextVersion); - if (changelog.length === 0) exit(new Error('No changelog found for version ' + nextVersion)); - - var strippedFromReleases = stripUnreachable(fromReleases); - var latestReachableFromVersion = Object.keys(strippedFromReleases).sort(semver.rcompare)[0]; - var latestFromImageName = fromReleases[latestReachableFromVersion].imageName; - var latestFromImageRevision = new RegExp('box-' + fromEnv.tag + '-([a-z,0-9.]+)-.*').exec(latestFromImageName)[1]; - - if (!latestFromImageRevision) exit('Unable to determine image revision'); - - getImageByRevision(toEnv, latestFromImageRevision, function (error, toImage) { - if (error) return exit(error); - - var sourceTarballName = url.parse(fromReleases[latestReachableFromVersion].sourceTarballUrl).pathname.substr(1); - var upgrade = fromReleases[latestReachableFromVersion].upgrade; - var tmpFile = '/tmp/' + sourceTarballName; - - console.log('Copying source code tarball %s to %s'.gray, sourceTarballName, toEnv.tag); - - console.log('Fetching source code tarball...'.gray); - var cmd = util.format('s3cmd get --force -v --ssl --access_key="%s" --secret_key="%s" "s3://%s/%s" %s', - fromEnv.accessKeyId, fromEnv.secretAccessKey, fromEnv.releasesBucket, sourceTarballName, tmpFile - ); - execSync(cmd, { stdio: [ null, process.stdout, process.stderr ] } ); - - console.log('Uploading source code tarball...'.gray); - cmd = util.format('s3cmd put -v --ssl --add-header=x-amz-acl:authenticated-read --access_key="%s" --secret_key="%s" %s "s3://%s/%s"', - toEnv.accessKeyId, toEnv.secretAccessKey, tmpFile, toEnv.releasesBucket, sourceTarballName - ); - execSync(cmd, { stdio: [ null, process.stdout, process.stderr ] } ); - - var strippedToReleases = stripUnreachable(toReleases); - var latestReachableToVersion = Object.keys(strippedToReleases).sort(semver.rcompare)[0]; - toReleases[latestReachableToVersion].next = nextVersion; - toReleases[nextVersion] = { - imageId: toImage.id, - imageName: toImage.name, - changelog: changelog, - upgrade: upgrade, - date: (new Date()).toISOString(), - sourceTarballUrl: 'https://' + toEnv.releasesBucket + '.s3.amazonaws.com/' + sourceTarballName, - author: username + ' <' + email + '>', - next: null - }; - - safe.fs.unlinkSync(tmpFile); - - verifyAndUpload(toEnv, toReleases, function (error) { - if (error) return exit(error); - - console.log('%s : %s', nextVersion, JSON.stringify(toReleases[nextVersion], null, 4)); - - notifyAdmins(toEnv, toReleases, exit); - }); - }); - }); - }); -} - -program.command('amend') - .option('--env ', 'Environment (dev/staging/prod)', 'dev') - .option('--code ', 'Source code url') - .option('--image ', 'Image id') - .option('--changelog ', 'Changelog') - .option('--upgrade', 'Set the upgrade flag') - .description('Amend last release. Use with care') - .action(function (options) { options.amend = true; createRelease(options); }); - -program.command('create') - .option('--env ', 'Environment (dev/staging/prod)', 'dev') - .option('--code ', 'Source code url') - .option('--image ', 'Image id') - .option('--changelog ', 'Changelog') - .option('--upgrade', 'Set the upgrade flag') - .option('--version ', 'Create the specified version', null) - .description('Create a new release') - .action(createRelease); - -program.command('edit') - .option('--env ', 'Environment (dev/staging/prod)', 'dev') - .description('Edit and upload versions.json') - .action(edit); - -program.command('list') - .option('--raw', 'Show raw json') - .option('--env ', 'Environment (dev/staging/prod)', 'dev') - .description('List the releases file') - .action(listRelease); - -program.command('new') - .option('--env ', 'Environment (dev/staging/prod)', 'dev') - .option('--file ', 'Upload file as versions.json') - .description('Upload a new versions.json') - .action(newRelease); - -program.command('publish') - .description('Publish latest staging version to production') - .option('--version ', 'Version to publish', null) - .action(function (options) { stage(ENVIRONMENTS['staging'], ENVIRONMENTS['prod'], options.version); }); - -program.command('rerelease') - .option('--env ', 'Environment (dev/staging/prod)', 'dev') - .option('--version ', 'Create the specified version', null) - .description('Make a new release, same as the last release') - .action(function (options) { options.rerelease = true; createRelease(options); }); - -program.command('revert') - .option('--env ', 'Environment (dev/staging/prod)', 'dev') - .description('Revert the last release. Use with care') - .action(function (options) { options.revert = true; createRelease(options); }); - -program.command('stage') - .description('Stage latest dev version to staging') - .option('--version ', 'Version to publish', null) - .action(function (options) { stage(ENVIRONMENTS['dev'], ENVIRONMENTS['staging'], options.version); }); - -program.command('sync') - .option('--env ', 'Environment (dev/staging)', 'dev') - .description('Sync the specified env with the parent env (prod -> staging or staging -> dev)') - .action(sync); - -program.command('touch') - .option('--env ', 'Environment (dev/staging/prod)', 'dev') - .description('Touch the releases file') - .action(touchRelease); - -program.parse(process.argv); - -if (!process.argv.slice(2).length) { - program.outputHelp(); -} else { // https://github.com/tj/commander.js/issues/338 - var knownCommand = program.commands.some(function (command) { return command._name === process.argv[2]; }); - if (!knownCommand) { - console.error('Unknown command: ' + process.argv[2]); - process.exit(1); - } -} - diff --git a/release/versions.json b/release/versions.json deleted file mode 100644 index b12a84030..000000000 --- a/release/versions.json +++ /dev/null @@ -1,418 +0,0 @@ -{ - "0.0.1": { - "sourceTarballUrl": "https://s3.amazonaws.com/cloudron-releases/box-5b369d2b78605140be63c8c2dc3e4af1ea6ae17b.tar.gz", - "imageId": 10504128, - "imageName": "box-e5d4524-2015-02-06-172850", - "changelog": [ - "Hot Chocolate" - ], - "upgrade": false, - "date": "Fri Feb 6 17:25:45 UTC 2015", - "next": "0.0.2" - }, - "0.0.2": { - "sourceTarballUrl": "https://s3.amazonaws.com/cloudron-releases/box-f2b6340c32c29e5e265abcd7044433d68ac0024c.tar.gz", - "imageId": 10504128, - "imageName": "box-e5d4524-2015-02-06-172850", - "changelog": [ - "Hotfix appstore ui in webadim" - ], - "upgrade": false, - "date": "Fri Feb 6 19:13:26 UTC 2015", - "next": "0.0.3" - }, - "0.0.3": { - "sourceTarballUrl": "https://s3.amazonaws.com/staging-cloudron-releases/box-20e43bdf9c6cf40d3412c59750bc43e834ec39d3.tar.gz", - "imageId": 10621904, - "imageName": "box-8c16ea0-2015-02-12-154005", - "changelog": [ - "Tall Pike" - ], - "upgrade": true, - "date": "Fri Feb 13 00:40:22 UTC 2015", - "next": "0.0.4" - }, - "0.0.4": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-20e43bdf9c6cf40d3412c59750bc43e834ec39d3.tar.gz", - "imageId": 10624164, - "imageName": "box-0ec7efa-2015-02-12-181028", - "changelog": [ - "Ghost release" - ], - "upgrade": false, - "date": "Tue Feb 17 18:03:31 UTC 2015", - "next": "0.0.5" - }, - "0.0.5": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-94383e98591934b648713eccfa67a3f7bbaf659b.tar.gz", - "imageId": 10694830, - "imageName": "box-24cfd4d-2015-02-18-140547", - "changelog": [ - "Banana Smoothie" - ], - "upgrade": true, - "date": "Thu Feb 19 00:13:35 UTC 2015", - "next": "0.0.6" - }, - "0.0.6": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-00dbdddce752d454a6a37b3b15eff9f24b0d8882.tar.gz", - "imageId": 10787693, - "imageName": "box-d7e153f-2015-02-25-192418", - "changelog": [ - "Chai Latte" - ], - "upgrade": true, - "date": "Thu Feb 26 04:19:48 UTC 2015", - "next": "0.0.7" - }, - "0.0.7": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-00dbdddce752d454a6a37b3b15eff9f24b0d8882.tar.gz", - "imageId": 10787693, - "imageName": "box-d7e153f-2015-02-25-192418", - "changelog": [ - "Rerelease for updating SSL certificates" - ], - "upgrade": true, - "date": "Fri Feb 27 07:49:36 UTC 2015", - "next": "0.0.8" - }, - "0.0.8": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-0b5b4535de027a0abccd823b75b4937ec4926d6c.tar.gz", - "imageId": 10881993, - "imageName": "box-3ad90f0-2015-03-04-155817", - "changelog": [ - "Orange Pekoe", - "It's all coming together!" - ], - "upgrade": true, - "date": "Thu Mar 5 00:23:34 UTC 2015", - "next": "0.0.9" - }, - "0.0.9": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-bbe982f14f861b39636ab37072d4e3b3c44a55ac.tar.gz", - "imageId": 10881993, - "imageName": "box-3ad90f0-2015-03-04-155817", - "changelog": [ - "Kashayam" - ], - "upgrade": false, - "date": "Mon Mar 9 23:21:42 UTC 2015", - "next": "0.0.10" - }, - "0.0.10": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-5aac5bd56fe8b917b198bdf7ec4b4bbe231e292c.tar.gz", - "imageId": 10881993, - "imageName": "box-3ad90f0-2015-03-04-155817", - "changelog": [ - "Hot fix for GitLab" - ], - "upgrade": false, - "date": "Tue Mar 10 02:40:53 UTC 2015", - "next": "0.0.11" - }, - "0.0.11": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-b50692fc670ef6ab1bd35d14076cf20fa48cf002.tar.gz", - "imageId": 10881993, - "imageName": "box-3ad90f0-2015-03-04-155817", - "changelog": [ - "Fix app updates" - ], - "upgrade": false, - "date": "Tue Mar 10 18:44:55 UTC 2015", - "next": "0.0.12" - }, - "0.0.12": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-062f94b335e8b57caf9ec4780402f023297fc1b7.tar.gz", - "imageId": 11055383, - "imageName": "box-4e04584-2015-03-17-161439", - "changelog": [ - "Port binding fixes" - ], - "upgrade": true, - "date": "Tue Mar 17 23:35:01 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.13" - }, - "0.0.13": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-ab0f9f691192c735825bb8aa04c1d246b22b067b.tar.gz", - "imageId": 11055383, - "imageName": "box-4e04584-2015-03-17-161439", - "changelog": [ - "Implement App ids" - ], - "upgrade": false, - "date": "Mon Mar 23 02:50:53 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.14" - }, - "0.0.14": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-4cabe44e998be028a86293827c58685f66ae2412.tar.gz", - "imageId": 11055383, - "imageName": "box-4e04584-2015-03-17-161439", - "changelog": [ - "Fix App updates" - ], - "upgrade": false, - "date": "Mon Mar 23 04:50:42 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.15" - }, - "0.0.15": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-d19af1ff6aa47cf2053b9e279078e0421579be57.tar.gz", - "imageId": 11055383, - "imageName": "box-4e04584-2015-03-17-161439", - "changelog": [ - "Fix manifest format" - ], - "upgrade": false, - "date": "Mon Mar 23 05:53:05 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.16" - }, - "0.0.16": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-d19af1ff6aa47cf2053b9e279078e0421579be57.tar.gz", - "imageId": 11162711, - "imageName": "box-e34c6ce-2015-03-25-121127", - "changelog": [ - "Image upgrade with newer addons" - ], - "upgrade": false, - "date": "Wed Mar 25 19:33:45 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.17" - }, - "0.0.17": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-5c1fb62adb35fc311565eb6495dc2985cfc6dc3d.tar.gz", - "imageId": 11162711, - "imageName": "box-e34c6ce-2015-03-25-121127", - "changelog": [ - "Subdomain API changes" - ], - "upgrade": false, - "date": "Wed Mar 25 19:41:28 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.18" - }, - "0.0.18": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-d609c0b3052422813a03a68403c4def47f2ffcba.tar.gz", - "imageId": 11162711, - "imageName": "box-e34c6ce-2015-03-25-121127", - "changelog": [ - "Reorg app data" - ], - "upgrade": false, - "date": "Wed Mar 25 20:41:25 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.19" - }, - "0.0.19": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-9b38e9a6f2e2abad725fedd4f78323619661ed55.tar.gz", - "imageId": 11162711, - "imageName": "box-e34c6ce-2015-03-25-121127", - "changelog": [ - "Delegate app dir creation" - ], - "upgrade": false, - "date": "Thu Mar 26 03:10:53 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.20" - }, - "0.0.20": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-b1381263d3e243eb986d4d9cbf7d2853171941bf.tar.gz", - "imageId": 11162711, - "imageName": "box-e34c6ce-2015-03-25-121127", - "changelog": [ - "Change backup strategy" - ], - "upgrade": false, - "date": "Wed Apr 1 08:18:50 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.21" - }, - "0.0.21": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-0db08cf3efc8345f90d41203cc18f747f42499fa.tar.gz", - "imageId": 11162711, - "imageName": "box-e34c6ce-2015-03-25-121127", - "changelog": [ - "Remove hacks for backup migration" - ], - "upgrade": false, - "date": "Wed Apr 1 08:38:52 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.22" - }, - "0.0.22": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-9cbaef5b41df6a9f5c597448427fe706d2fb0221.tar.gz", - "imageId": 11282641, - "imageName": "box-28a9001-2015-04-02-193801", - "changelog": [ - "Backup fixes" - ], - "upgrade": true, - "date": "Fri Apr 3 02:58:35 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.23" - }, - "0.0.23": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-038ae04f9edbe5727931eace1a62b083c6cebd79.tar.gz", - "imageId": 11384175, - "imageName": "box-1c48a4b-2015-04-09-231714", - "changelog": [ - "Polish, polish, polish" - ], - "upgrade": true, - "date": "Fri Apr 10 06:46:56 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.24" - }, - "0.0.24": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-a168846a056e03f5804cc7b0de8bf9438aa0a4a5.tar.gz", - "imageId": 11390303, - "imageName": "box-94f1086-2015-04-10-174805", - "changelog": [ - "Upgrade all apps and containers" - ], - "upgrade": true, - "date": "Sat Apr 11 01:10:31 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.25" - }, - "0.0.25": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-0a82fcb15f060aa53c1ec0f767d3e56f707416aa.tar.gz", - "imageId": 11390303, - "imageName": "box-94f1086-2015-04-10-174805", - "changelog": [ - "Fix backup creation issue caused by execFile buffer overflow" - ], - "upgrade": false, - "date": "Mon Apr 13 05:00:04 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.26" - }, - "0.0.26": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-d8ac0330e8c9821c5094d7ba985cbece963489c1.tar.gz", - "imageId": 11390303, - "imageName": "box-94f1086-2015-04-10-174805", - "changelog": [ - "Update manifestformat" - ], - "upgrade": false, - "date": "Mon Apr 13 19:54:17 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.27" - }, - "0.0.27": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-c9b7d9a0181911fce6cbc79f5cc965affa2ab2ae.tar.gz", - "imageId": 11390303, - "imageName": "box-94f1086-2015-04-10-174805", - "changelog": [ - "_docker", - "Rickshaw inclusion" - ], - "upgrade": false, - "date": "Fri Apr 17 16:37:25 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.28" - }, - "0.0.28": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-c9b7d9a0181911fce6cbc79f5cc965affa2ab2ae.tar.gz", - "imageId": 11463691, - "imageName": "box-f620aed-2015-04-17-090956", - "changelog": [ - "docker image cleanup" - ], - "upgrade": true, - "date": "Fri Apr 17 16:40:53 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.29" - }, - "0.0.29": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-b22136f11b0b7f51f718aad18904725a6f5c95db.tar.gz", - "imageId": 11463691, - "imageName": "box-f620aed-2015-04-17-090956", - "changelog": [ - "Fix app states" - ], - "upgrade": false, - "date": "Tue Apr 21 17:27:00 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.30" - }, - "0.0.30": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-73de1a7c4e422e4f320b8e8c336e06b48eee1241.tar.gz", - "imageId": 11535902, - "imageName": "box-abaa2f6-2015-04-22-115250", - "changelog": [ - "Fix nginx and collectd configuration setup", - "Foundation for updating box without app restart" - ], - "upgrade": true, - "date": "Wed Apr 22 21:44:00 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.31" - }, - "0.0.31": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-a36d6d864f8905706b94bf7f0c3e50ae0ae857f4.tar.gz", - "imageId": 11535902, - "imageName": "box-abaa2f6-2015-04-22-115250", - "changelog": [ - "Do not restart apps for box update" - ], - "upgrade": false, - "date": "Thu Apr 23 02:51:43 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.32" - }, - "0.0.32": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-69f9bee6777db74416530aba09541243d215194e.tar.gz", - "imageId": 11535902, - "imageName": "box-abaa2f6-2015-04-22-115250", - "changelog": [ - "Fix nginx templating", - "Implement infrastructure versioning" - ], - "upgrade": false, - "date": "Fri Apr 24 18:21:42 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.33" - }, - "0.0.33": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-11aecac6fde56f2289d711e4dee60baa1ccf4d00.tar.gz", - "imageId": 11567620, - "imageName": "box-e877472-2015-04-24-160626", - "changelog": [ - "Fix bug in retire", - "Fix non-infra upgrade" - ], - "upgrade": true, - "date": "Sat Apr 25 01:43:44 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.34" - }, - "0.0.34": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-88bbe86000066eb264c2fa00a55d3b9ddd3e50dd.tar.gz", - "imageId": 11567620, - "imageName": "box-e877472-2015-04-24-160626", - "changelog": [ - "Fix addons ownership", - "Fix backup for upgrades" - ], - "upgrade": false, - "date": "Sat Apr 25 03:34:48 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": "0.0.35" - }, - "0.0.35": { - "sourceTarballUrl": "https://staging-cloudron-releases.s3.amazonaws.com/box-7b7df404b7e7463853d440ba07becceac49c1888.tar.gz", - "imageId": 11876727, - "imageName": "box-cc1f57c-2015-05-15-200507", - "changelog": [ - "Change restart policy of containers to always", - "WARNING! This is an upgrade your cloudron will restart" - ], - "upgrade": true, - "date": "Fri May 15 17:17:40 UTC 2015", - "author": "Girish Ramakrishnan ", - "next": null - } -} diff --git a/release/versionsformat.js b/release/versionsformat.js deleted file mode 100644 index 1cb91dca5..000000000 --- a/release/versionsformat.js +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env node - -'use strict'; - -var fs = require('fs'), - safe = require('safetydance'), - semver = require('semver'), - util = require('util'), - url = require('url'); - -exports = module.exports = { - verifyFile: verifyFile, - verify: verify -}; - -function verify(versionsJson) { - if (!versionsJson || typeof versionsJson !== 'object') return new Error('versions must be valid object'); - - // check all the keys - var sortedVersions = Object.keys(versionsJson).sort(semver.compare); - for (var i = 0; i < sortedVersions.length; i++) { - var version = sortedVersions[i]; - var versionInfo = versionsJson[version]; - - if (typeof versionInfo.imageId !== 'number') return new Error('version ' + version + ' does not have proper imageId'); - - if (typeof versionInfo.imageName !== 'string' || !versionInfo.imageName.length) return new Error('version ' + version + ' does not have proper imageName'); - - if ('changeLog' in versionsJson[version] && !util.isArray(versionInfo.changeLog)) return new Error('version ' + version + ' does not have proper changeLog'); - - if (typeof versionInfo.date !== 'string' || ((new Date(versionInfo.date)).toString() === 'Invalid Date')) return new Error('invalid date or missing date'); - - if (versionInfo.next !== null) { - if (typeof versionInfo.next !== 'string') return new Error('version ' + version + ' does not have "string" next'); - if (!semver.valid(versionInfo.next)) return new Error('version ' + version + ' has non-semver next'); - if (!(versionInfo.next in versionsJson)) return new Error('version ' + version + ' points to non-existent version'); - } - - if (typeof versionInfo.sourceTarballUrl !== 'string') return new Error('version ' + version + ' does not have proper sourceTarballUrl'); - - if ('author' in versionsJson[version] && typeof versionInfo.author !== 'string') return new Error('author must be a string'); - - var tarballUrl = url.parse(versionInfo.sourceTarballUrl); - if (tarballUrl.protocol !== 'https:') return new Error('sourceTarballUrl must be https'); - if (!/.tar.gz$/.test(tarballUrl.path)) return new Error('sourceTarballUrl must be tar.gz'); - - var nextVersion = versionInfo.next; - // despite having the 'next' field, the appstore code currently relies on all versions being sorted based on semver.compare (see boxversions.js) - if (nextVersion && semver.gt(version, nextVersion)) return new Error('next version cannot be less than current @' + version); - } - - // check that package.json version is in versions.json - var currentVersion = require('../package.json').version; - if (sortedVersions.indexOf(currentVersion) === -1) { - return new Error('package.json version is not present in versions.json'); - } - - return null; -} - -function verifyFile(versionsFileName) { - // check if the json is valid - var versions = safe.JSON.parse(fs.readFileSync(versionsFileName)); - if (!versions) { - return new Error(versionsFileName + ' is not valid json : ' + safe.error); - } - - return verify(versions); -} - - From a2e2d70660a9d66a2ed36dfdfc5bd99437673501 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 12 Oct 2015 11:34:25 -0700 Subject: [PATCH 142/234] remove deps of the release tool --- package.json | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/package.json b/package.json index 0b26ec60e..3f6abe5df 100644 --- a/package.json +++ b/package.json @@ -25,22 +25,11 @@ "superagent": "^0.21.0" }, "devDependencies": { - "aws-sdk": "^2.1.10", - "colors": "^1.0.3", - "commander": "^2.6.0", - "easy-table": "^0.3.0", "expect.js": "^0.3.1", "istanbul": "^0.3.5", "lodash": "^3.2.0", "mocha": "^2.1.0", - "nock": "^0.59.1", - "postmark": "^1.0.0", - "readline-sync": "^0.8.0", - "semver": "^4.3.0", - "ssh2": "^0.4.6", - "supererror": "^0.6.0", - "underscore": "^1.8.3", - "yesno": "0.0.1" + "nock": "^0.59.1" }, "scripts": { "test": "NODE_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- -R spec ./src/test", From 31125dedc8f0da0b36eeb9b7804c04948df14897 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 12 Oct 2015 11:36:46 -0700 Subject: [PATCH 143/234] add deps of image tooling --- package.json | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/package.json b/package.json index 3f6abe5df..8bec54bec 100644 --- a/package.json +++ b/package.json @@ -25,11 +25,16 @@ "superagent": "^0.21.0" }, "devDependencies": { + "async": "https://registry.npmjs.org/async/-/async-0.9.0.tgz", + "commander": "^2.8.1", "expect.js": "^0.3.1", "istanbul": "^0.3.5", "lodash": "^3.2.0", "mocha": "^2.1.0", - "nock": "^0.59.1" + "nock": "^0.59.1", + "superagent": "https://registry.npmjs.org/superagent/-/superagent-0.21.0.tgz", + "supererror": "^0.7.0", + "yesno": "0.0.1" }, "scripts": { "test": "NODE_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- -R spec ./src/test", From 2cdf73adab83b4b3dadd1c95945d7a4f40011315 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 13 Oct 2015 11:59:28 -0700 Subject: [PATCH 144/234] Use 20m of logs per app --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index db81658d1..bec37f39a 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -87,7 +87,7 @@ mkfs.btrfs -L UserHome "${USER_DATA_FILE}" echo "${USER_DATA_FILE} ${USER_DATA_DIR} btrfs loop,nosuid 0 0" >> /etc/fstab mkdir -p "${USER_DATA_DIR}" && mount "${USER_DATA_FILE}" mkdir -p "${USER_DATA_DIR}/docker" -sed -e "s,ExecStart=.*,ExecStart=/usr/bin/docker -d -H fd:// -s btrfs -g ${USER_DATA_DIR}/docker," -i /lib/systemd/system/docker.service +sed -e "s,ExecStart=.*,ExecStart=/usr/bin/docker -d -H fd:// -s btrfs -g ${USER_DATA_DIR}/docker --log-driver=json-file --log-opt max-file=4 --log-opt max-size=5m," -i /lib/systemd/system/docker.service systemctl enable docker # give docker sometime to start up and create iptables rules From fe0c1745e1b517e5b6a1f54c4303799d4b31ece2 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 14 Oct 2015 15:08:38 -0700 Subject: [PATCH 145/234] make it explicit that logrotate is run via cron in base system --- images/initializeBaseUbuntuImage.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index bec37f39a..de937e19e 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -146,6 +146,11 @@ echo "==== Install collectd ===" apt-get install -y collectd collectd-utils update-rc.d -f collectd remove +# this simply makes it explicit that we run logrotate via cron. it's already part of base ubuntu +echo "==== Install logrotate ===" +apt-get install -y cron logrotate +systemctl enable cron + echo "==== Extracting installer source ====" rm -rf "${INSTALLER_SOURCE_DIR}" && mkdir -p "${INSTALLER_SOURCE_DIR}" tar xvf /root/installer.tar -C "${INSTALLER_SOURCE_DIR}" && rm /root/installer.tar From 6cf0554e23a961ccdfceb85d6e4f2760a4590c22 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Fri, 30 Oct 2015 17:40:17 -0700 Subject: [PATCH 146/234] do not resolve to a tag --- images/createDigitalOceanImage.sh | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index 4561c6f74..00a675f20 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -65,19 +65,8 @@ fi function get_pretty_revision() { local git_rev="$1" local sha1=$(git rev-parse --short "${git_rev}" 2>/dev/null) - local name=$(git name-rev --name-only --tags "${sha1}" 2>/dev/null) - if [[ -z "${name}" ]]; then - echo "Unable to resolve $1" - exit 1 - fi - - # fallback to sha1 if we cannot find a tag - if [[ "${name}" == "undefined" ]]; then - echo "${sha1}" - else - echo "${name}" - fi + echo "${sha1}" } now=$(date "+%Y-%m-%d-%H%M%S") From 71faa5f89e3973c453908b5cfebf9d5da2b1bbd2 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Sun, 1 Nov 2015 08:46:28 -0800 Subject: [PATCH 147/234] Move ssh to port 202 --- images/createDigitalOceanImage.sh | 20 +++++++++++++------- images/initializeBaseUbuntuImage.sh | 18 +++++++++++++++--- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index 00a675f20..7abbcc09e 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -11,6 +11,12 @@ readonly INSTALLER_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)" readonly JSON="${INSTALLER_DIR}/node_modules/.bin/json" readonly ssh_keys="${HOME}/.ssh/id_rsa_yellowtent" +readonly scp202="scp -P 202 -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" +readonly scp22="scp -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" + +readonly ssh202="ssh -p 202 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" +readonly ssh22="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" + installer_revision=$(git rev-parse HEAD) box_size="512mb" image_regions=(sfo1 ams3) @@ -120,7 +126,7 @@ done while true; do echo "Trying to copy init script to droplet" - if scp -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${SCRIPT_DIR}/initializeBaseUbuntuImage.sh" root@${droplet_ip}:.; then + if $scp22 "${SCRIPT_DIR}/initializeBaseUbuntuImage.sh" root@${droplet_ip}:.; then break fi echo "Timedout, trying again in 30 seconds" @@ -128,24 +134,24 @@ while true; do done echo "Copying INFRA_VERSION" -scp -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${SCRIPT_DIR}/../../box/setup/INFRA_VERSION" root@${droplet_ip}:. +$scp22 "${SCRIPT_DIR}/../../box/setup/INFRA_VERSION" root@${droplet_ip}:. echo "Copying installer source" cd "${INSTALLER_DIR}" -git archive --format=tar HEAD | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "root@${droplet_ip}" "cat - > /root/installer.tar" +git archive --format=tar HEAD | $ssh22 "root@${droplet_ip}" "cat - > /root/installer.tar" echo "Executing init script" -if ! ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "root@${droplet_ip}" "/bin/bash /root/initializeBaseUbuntuImage.sh ${installer_revision}"; then +if ! $ssh22 "root@${droplet_ip}" "/bin/bash /root/initializeBaseUbuntuImage.sh ${installer_revision}"; then echo "Init script failed" exit 1 fi echo "Copy over certs" -scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${INSTALLER_DIR}/../keys/installer/" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" -scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "${INSTALLER_DIR}/../keys/installer_ca/ca.crt" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" +$scp202 -r "${INSTALLER_DIR}/../keys/installer/" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" +$scp202 "${INSTALLER_DIR}/../keys/installer_ca/ca.crt" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" echo "Shutting down droplet with id : ${droplet_id}" -ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "${ssh_keys}" "root@${droplet_ip}" "shutdown -f now" || true # shutdown sometimes terminates ssh connection immediately making this command fail +$ssh202 "root@${droplet_ip}" "shutdown -f now" || true # shutdown sometimes terminates ssh connection immediately making this command fail # wait 10 secs for actual shutdown echo "Waiting for 10 seconds for droplet to shutdown" diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index de937e19e..cc3adebb9 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -39,8 +39,7 @@ iptables -P OUTPUT ACCEPT # NOTE: keep these in sync with src/apps.js validatePortBindings # allow ssh, http, https, ping, dns iptables -I INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT -iptables -A INPUT -p tcp --dport 22 -j ACCEPT -iptables -A INPUT -p tcp -m tcp -m multiport --dports 80,443,886 -j ACCEPT +iptables -A INPUT -p tcp -m tcp -m multiport --dports 80,202,443,886 -j ACCEPT iptables -A INPUT -p icmp --icmp-type echo-request -j ACCEPT iptables -A INPUT -p icmp --icmp-type echo-reply -j ACCEPT iptables -A INPUT -p udp --sport 53 -j ACCEPT @@ -63,7 +62,7 @@ iptables -A INPUT -j LOGGING # last rule in INPUT chain iptables -A LOGGING -m limit --limit 2/min -j LOG --log-prefix "IPTables Packet Dropped: " --log-level 7 iptables -A LOGGING -j DROP -echo "==== Install btrfs tools" +echo "==== Install btrfs tools ===" apt-get -y install btrfs-tools echo "==== Install docker ====" @@ -232,3 +231,16 @@ sync sed -e 's/^#NTP=/NTP=0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org/' -i /etc/systemd/timesyncd.conf timedatectl set-ntp 1 timedatectl set-timezone UTC + +echo "==== Install ssh ===" +apt-get -y install openssh-server +# https://stackoverflow.com/questions/4348166/using-with-sed on why ? must be escaped +sed -e 's/^#\?Port .*/Port 202/g' \ + -e 's/^#\?PermitRootLogin .*/PermitRootLogin without-password/g' \ + -e 's/^#\?PermitEmptyPasswords .*/PermitEmptyPasswords no/g' \ + -e 's/^#\?PasswordAuthentication .*/PasswordAuthentication no/g' \ + -i /etc/ssh/sshd_config + + # required so we can connect to this machine since port 22 is blocked by iptables by now +systemctl reload sshd + From 51a0ad70aa3c3d07bd91d614e14cbb14db952305 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 2 Nov 2015 08:54:10 -0800 Subject: [PATCH 148/234] log to journald instead --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index cc3adebb9..b6f9e6be6 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -86,7 +86,7 @@ mkfs.btrfs -L UserHome "${USER_DATA_FILE}" echo "${USER_DATA_FILE} ${USER_DATA_DIR} btrfs loop,nosuid 0 0" >> /etc/fstab mkdir -p "${USER_DATA_DIR}" && mount "${USER_DATA_FILE}" mkdir -p "${USER_DATA_DIR}/docker" -sed -e "s,ExecStart=.*,ExecStart=/usr/bin/docker -d -H fd:// -s btrfs -g ${USER_DATA_DIR}/docker --log-driver=json-file --log-opt max-file=4 --log-opt max-size=5m," -i /lib/systemd/system/docker.service +sed -e "s,ExecStart=.*,ExecStart=/usr/bin/docker -d -H fd:// -s btrfs -g ${USER_DATA_DIR}/docker --log-driver=journald," -i /lib/systemd/system/docker.service systemctl enable docker # give docker sometime to start up and create iptables rules From bb040eb5a8775d93c86ff72eb2376731e46776b0 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 2 Nov 2015 13:20:43 -0800 Subject: [PATCH 149/234] give yellowtent user access to cloudron logs --- images/initializeBaseUbuntuImage.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index b6f9e6be6..0238dd73f 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -232,6 +232,10 @@ sed -e 's/^#NTP=/NTP=0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.n timedatectl set-ntp 1 timedatectl set-timezone UTC +# Give user access to system logs +usermod -a -G systemd-journal ${USER} +setfacl -n -m u:${USER}:r /var/log/journal/*/system.journal + echo "==== Install ssh ===" apt-get -y install openssh-server # https://stackoverflow.com/questions/4348166/using-with-sed on why ? must be escaped From 6a0b8e0722d5333bbf68e5ee4a5fb38049619196 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 3 Nov 2015 16:52:38 -0800 Subject: [PATCH 150/234] remove unused backup route --- src/server.js | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/server.js b/src/server.js index 7a859d6f8..e517919f1 100755 --- a/src/server.js +++ b/src/server.js @@ -75,15 +75,6 @@ function retire(req, res, next) { next(new HttpSuccess(202, {})); } -function backup(req, res, next) { - // !! below port has to be in sync with box/config.js internalPort - superagent.post('http://127.0.0.1:3001/api/v1/backup').end(function (error, result) { - if (error) return next(new HttpError(500, error)); - if (result.statusCode !== 202) return next(new HttpError(result.statusCode, 'trigger backup failed with ' + result.body.message)); - next(new HttpSuccess(202, {})); - }); -} - function startUpdateServer(callback) { assert.strictEqual(typeof callback, 'function'); @@ -123,7 +114,6 @@ function startProvisionServer(callback) { .use(lastMile()); router.post('/api/v1/installer/retire', retire); - router.post('/api/v1/installer/backup', backup); var caPath = path.join(__dirname, process.env.NODE_ENV === 'test' ? '../../keys/installer_ca' : 'certs'); var certPath = path.join(__dirname, process.env.NODE_ENV === 'test' ? '../../keys/installer' : 'certs'); From 15f686fc6973bbf77f6bbf70c722d7146d9c2d74 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 10 Nov 2015 01:53:09 -0800 Subject: [PATCH 151/234] reboot automatically on panic after 5 seconds --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 0238dd73f..f54d2749c 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -100,7 +100,7 @@ iptables -I FORWARD -d 169.254.169.254 -j DROP mkdir /etc/iptables && iptables-save > /etc/iptables/rules.v4 echo "=== Enable memory accounting ==" -sed -e 's/GRUB_CMDLINE_LINUX=.*/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"/' -i /etc/default/grub +sed -e 's/GRUB_CMDLINE_LINUX=.*/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1 panic_on_oops=1 panic=5"/' -i /etc/default/grub update-grub # now add the user to the docker group From 0ebe6bde3dfe3dc3dbdd22c485b655f9b56a2381 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 11 Nov 2015 15:46:15 -0800 Subject: [PATCH 152/234] remove async and superagent from dev deps --- package.json | 2 -- 1 file changed, 2 deletions(-) diff --git a/package.json b/package.json index 8bec54bec..ad6955d84 100644 --- a/package.json +++ b/package.json @@ -25,14 +25,12 @@ "superagent": "^0.21.0" }, "devDependencies": { - "async": "https://registry.npmjs.org/async/-/async-0.9.0.tgz", "commander": "^2.8.1", "expect.js": "^0.3.1", "istanbul": "^0.3.5", "lodash": "^3.2.0", "mocha": "^2.1.0", "nock": "^0.59.1", - "superagent": "https://registry.npmjs.org/superagent/-/superagent-0.21.0.tgz", "supererror": "^0.7.0", "yesno": "0.0.1" }, From 398dfce698d77cbcde78c44d7d22530376a2e105 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 11 Nov 2015 15:48:32 -0800 Subject: [PATCH 153/234] update packages --- package.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/package.json b/package.json index ad6955d84..d70c990da 100644 --- a/package.json +++ b/package.json @@ -13,15 +13,15 @@ "node >= 0.10.0" ], "dependencies": { - "async": "^0.9.0", + "async": "^1.5.0", "body-parser": "^1.12.0", - "connect-lastmile": "0.0.10", + "connect-lastmile": "0.0.13", "debug": "^2.1.1", "express": "^4.11.2", "json": "^9.0.3", "morgan": "^1.5.1", - "proxy-middleware": "^0.11.0", - "safetydance": "0.0.16", + "proxy-middleware": "^0.15.0", + "safetydance": "0.0.19", "superagent": "^0.21.0" }, "devDependencies": { From 44ca59ac706951d407419b081a04678b37e3b88b Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 11 Nov 2015 15:49:38 -0800 Subject: [PATCH 154/234] update shrinkwrap --- npm-shrinkwrap.json | 450 +++++++++++++++++++++++++------------------- 1 file changed, 260 insertions(+), 190 deletions(-) diff --git a/npm-shrinkwrap.json b/npm-shrinkwrap.json index 120fbb335..4149f26fb 100644 --- a/npm-shrinkwrap.json +++ b/npm-shrinkwrap.json @@ -3,76 +3,100 @@ "version": "0.0.1", "dependencies": { "async": { - "version": "0.9.0", - "from": "https://registry.npmjs.org/async/-/async-0.9.0.tgz", - "resolved": "https://registry.npmjs.org/async/-/async-0.9.0.tgz" + "version": "1.5.0", + "from": "async@>=1.5.0 <2.0.0", + "resolved": "https://registry.npmjs.org/async/-/async-1.5.0.tgz" }, "body-parser": { - "version": "1.12.2", - "from": "https://registry.npmjs.org/body-parser/-/body-parser-1.12.2.tgz", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.12.2.tgz", + "version": "1.14.1", + "from": "body-parser@>=1.12.0 <2.0.0", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.14.1.tgz", "dependencies": { "bytes": { - "version": "1.0.0", - "from": "http://registry.npmjs.org/bytes/-/bytes-1.0.0.tgz", - "resolved": "http://registry.npmjs.org/bytes/-/bytes-1.0.0.tgz" + "version": "2.1.0", + "from": "bytes@2.1.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-2.1.0.tgz" }, "content-type": { "version": "1.0.1", - "from": "https://registry.npmjs.org/content-type/-/content-type-1.0.1.tgz", + "from": "content-type@>=1.0.1 <1.1.0", "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.1.tgz" }, "depd": { - "version": "1.0.0", - "from": "http://registry.npmjs.org/depd/-/depd-1.0.0.tgz", - "resolved": "http://registry.npmjs.org/depd/-/depd-1.0.0.tgz" + "version": "1.1.0", + "from": "depd@>=1.1.0 <1.2.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.0.tgz" + }, + "http-errors": { + "version": "1.3.1", + "from": "http-errors@>=1.3.1 <1.4.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.3.1.tgz", + "dependencies": { + "inherits": { + "version": "2.0.1", + "from": "inherits@>=2.0.1 <2.1.0", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz" + }, + "statuses": { + "version": "1.2.1", + "from": "statuses@>=1.0.0 <2.0.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.2.1.tgz" + } + } }, "iconv-lite": { - "version": "0.4.7", - "from": "http://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.7.tgz", - "resolved": "http://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.7.tgz" + "version": "0.4.12", + "from": "iconv-lite@0.4.12", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.12.tgz" }, "on-finished": { - "version": "2.2.0", - "from": "https://registry.npmjs.org/on-finished/-/on-finished-2.2.0.tgz", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.2.0.tgz", + "version": "2.3.0", + "from": "on-finished@>=2.3.0 <2.4.0", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", "dependencies": { "ee-first": { - "version": "1.1.0", - "from": "http://registry.npmjs.org/ee-first/-/ee-first-1.1.0.tgz", - "resolved": "http://registry.npmjs.org/ee-first/-/ee-first-1.1.0.tgz" + "version": "1.1.1", + "from": "ee-first@1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz" } } }, "qs": { - "version": "2.4.1", - "from": "https://registry.npmjs.org/qs/-/qs-2.4.1.tgz", - "resolved": "https://registry.npmjs.org/qs/-/qs-2.4.1.tgz" + "version": "5.1.0", + "from": "qs@5.1.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-5.1.0.tgz" }, "raw-body": { - "version": "1.3.3", - "from": "http://registry.npmjs.org/raw-body/-/raw-body-1.3.3.tgz", - "resolved": "http://registry.npmjs.org/raw-body/-/raw-body-1.3.3.tgz" + "version": "2.1.4", + "from": "raw-body@>=2.1.4 <2.2.0", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.1.4.tgz", + "dependencies": { + "unpipe": { + "version": "1.0.0", + "from": "unpipe@1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz" + } + } }, "type-is": { - "version": "1.6.1", - "from": "https://registry.npmjs.org/type-is/-/type-is-1.6.1.tgz", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.1.tgz", + "version": "1.6.9", + "from": "type-is@>=1.6.9 <1.7.0", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.9.tgz", "dependencies": { "media-typer": { "version": "0.3.0", - "from": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "resolved": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz" + "from": "media-typer@0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz" }, "mime-types": { - "version": "2.0.10", - "from": "https://registry.npmjs.org/mime-types/-/mime-types-2.0.10.tgz", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.0.10.tgz", + "version": "2.1.7", + "from": "mime-types@>=2.1.7 <2.2.0", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.7.tgz", "dependencies": { "mime-db": { - "version": "1.8.0", - "from": "https://registry.npmjs.org/mime-db/-/mime-db-1.8.0.tgz", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.8.0.tgz" + "version": "1.19.0", + "from": "mime-db@>=1.19.0 <1.20.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.19.0.tgz" } } } @@ -81,357 +105,403 @@ } }, "connect-lastmile": { - "version": "0.0.10", - "from": "http://registry.npmjs.org/connect-lastmile/-/connect-lastmile-0.0.10.tgz", - "resolved": "http://registry.npmjs.org/connect-lastmile/-/connect-lastmile-0.0.10.tgz" + "version": "0.0.13", + "from": "connect-lastmile@0.0.13", + "resolved": "https://registry.npmjs.org/connect-lastmile/-/connect-lastmile-0.0.13.tgz", + "dependencies": { + "debug": { + "version": "2.1.3", + "from": "debug@>=2.1.0 <2.2.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.1.3.tgz", + "dependencies": { + "ms": { + "version": "0.7.0", + "from": "ms@0.7.0", + "resolved": "http://registry.npmjs.org/ms/-/ms-0.7.0.tgz" + } + } + } + } }, "debug": { - "version": "2.1.3", - "from": "https://registry.npmjs.org/debug/-/debug-2.1.3.tgz", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.1.3.tgz", + "version": "2.2.0", + "from": "debug@>=2.1.1 <3.0.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.2.0.tgz", "dependencies": { "ms": { - "version": "0.7.0", - "from": "http://registry.npmjs.org/ms/-/ms-0.7.0.tgz", - "resolved": "http://registry.npmjs.org/ms/-/ms-0.7.0.tgz" + "version": "0.7.1", + "from": "ms@0.7.1", + "resolved": "https://registry.npmjs.org/ms/-/ms-0.7.1.tgz" } } }, "express": { - "version": "4.12.3", - "from": "https://registry.npmjs.org/express/-/express-4.12.3.tgz", - "resolved": "https://registry.npmjs.org/express/-/express-4.12.3.tgz", + "version": "4.13.3", + "from": "express@>=4.11.2 <5.0.0", + "resolved": "https://registry.npmjs.org/express/-/express-4.13.3.tgz", "dependencies": { "accepts": { - "version": "1.2.5", - "from": "https://registry.npmjs.org/accepts/-/accepts-1.2.5.tgz", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.2.5.tgz", + "version": "1.2.13", + "from": "accepts@>=1.2.12 <1.3.0", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.2.13.tgz", "dependencies": { "mime-types": { - "version": "2.0.10", - "from": "https://registry.npmjs.org/mime-types/-/mime-types-2.0.10.tgz", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.0.10.tgz", + "version": "2.1.7", + "from": "mime-types@>=2.1.6 <2.2.0", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.7.tgz", "dependencies": { "mime-db": { - "version": "1.8.0", - "from": "https://registry.npmjs.org/mime-db/-/mime-db-1.8.0.tgz", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.8.0.tgz" + "version": "1.19.0", + "from": "mime-db@>=1.19.0 <1.20.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.19.0.tgz" } } }, "negotiator": { - "version": "0.5.1", - "from": "https://registry.npmjs.org/negotiator/-/negotiator-0.5.1.tgz", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.5.1.tgz" + "version": "0.5.3", + "from": "negotiator@0.5.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.5.3.tgz" } } }, + "array-flatten": { + "version": "1.1.1", + "from": "array-flatten@1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz" + }, "content-disposition": { "version": "0.5.0", - "from": "http://registry.npmjs.org/content-disposition/-/content-disposition-0.5.0.tgz", + "from": "content-disposition@0.5.0", "resolved": "http://registry.npmjs.org/content-disposition/-/content-disposition-0.5.0.tgz" }, "content-type": { "version": "1.0.1", - "from": "https://registry.npmjs.org/content-type/-/content-type-1.0.1.tgz", + "from": "content-type@>=1.0.1 <1.1.0", "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.1.tgz" }, "cookie": { - "version": "0.1.2", - "from": "https://registry.npmjs.org/cookie/-/cookie-0.1.2.tgz", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.1.2.tgz" + "version": "0.1.3", + "from": "cookie@0.1.3", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.1.3.tgz" }, "cookie-signature": { "version": "1.0.6", - "from": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "from": "cookie-signature@1.0.6", "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz" }, "depd": { - "version": "1.0.0", - "from": "http://registry.npmjs.org/depd/-/depd-1.0.0.tgz", - "resolved": "http://registry.npmjs.org/depd/-/depd-1.0.0.tgz" + "version": "1.0.1", + "from": "depd@>=1.0.1 <1.1.0", + "resolved": "http://registry.npmjs.org/depd/-/depd-1.0.1.tgz" }, "escape-html": { - "version": "1.0.1", - "from": "http://registry.npmjs.org/escape-html/-/escape-html-1.0.1.tgz", - "resolved": "http://registry.npmjs.org/escape-html/-/escape-html-1.0.1.tgz" + "version": "1.0.2", + "from": "escape-html@1.0.2", + "resolved": "http://registry.npmjs.org/escape-html/-/escape-html-1.0.2.tgz" }, "etag": { - "version": "1.5.1", - "from": "http://registry.npmjs.org/etag/-/etag-1.5.1.tgz", - "resolved": "http://registry.npmjs.org/etag/-/etag-1.5.1.tgz", + "version": "1.7.0", + "from": "etag@>=1.7.0 <1.8.0", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.7.0.tgz" + }, + "finalhandler": { + "version": "0.4.0", + "from": "finalhandler@0.4.0", + "resolved": "http://registry.npmjs.org/finalhandler/-/finalhandler-0.4.0.tgz", "dependencies": { - "crc": { - "version": "3.2.1", - "from": "http://registry.npmjs.org/crc/-/crc-3.2.1.tgz", - "resolved": "http://registry.npmjs.org/crc/-/crc-3.2.1.tgz" + "unpipe": { + "version": "1.0.0", + "from": "unpipe@>=1.0.0 <1.1.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz" } } }, - "finalhandler": { - "version": "0.3.4", - "from": "http://registry.npmjs.org/finalhandler/-/finalhandler-0.3.4.tgz", - "resolved": "http://registry.npmjs.org/finalhandler/-/finalhandler-0.3.4.tgz" - }, "fresh": { - "version": "0.2.4", - "from": "http://registry.npmjs.org/fresh/-/fresh-0.2.4.tgz", - "resolved": "http://registry.npmjs.org/fresh/-/fresh-0.2.4.tgz" + "version": "0.3.0", + "from": "fresh@0.3.0", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.3.0.tgz" }, "merge-descriptors": { "version": "1.0.0", - "from": "http://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.0.tgz", - "resolved": "http://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.0.tgz" + "from": "merge-descriptors@1.0.0", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.0.tgz" }, "methods": { "version": "1.1.1", - "from": "https://registry.npmjs.org/methods/-/methods-1.1.1.tgz", + "from": "methods@>=1.1.1 <1.2.0", "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.1.tgz" }, "on-finished": { - "version": "2.2.0", - "from": "https://registry.npmjs.org/on-finished/-/on-finished-2.2.0.tgz", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.2.0.tgz", + "version": "2.3.0", + "from": "on-finished@>=2.3.0 <2.4.0", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", "dependencies": { "ee-first": { - "version": "1.1.0", - "from": "http://registry.npmjs.org/ee-first/-/ee-first-1.1.0.tgz", - "resolved": "http://registry.npmjs.org/ee-first/-/ee-first-1.1.0.tgz" + "version": "1.1.1", + "from": "ee-first@1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz" } } }, "parseurl": { "version": "1.3.0", - "from": "http://registry.npmjs.org/parseurl/-/parseurl-1.3.0.tgz", - "resolved": "http://registry.npmjs.org/parseurl/-/parseurl-1.3.0.tgz" + "from": "parseurl@>=1.3.0 <1.4.0", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.0.tgz" }, "path-to-regexp": { - "version": "0.1.3", - "from": "http://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.3.tgz", - "resolved": "http://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.3.tgz" + "version": "0.1.7", + "from": "path-to-regexp@0.1.7", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz" }, "proxy-addr": { - "version": "1.0.7", - "from": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-1.0.7.tgz", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-1.0.7.tgz", + "version": "1.0.8", + "from": "proxy-addr@>=1.0.8 <1.1.0", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-1.0.8.tgz", "dependencies": { "forwarded": { "version": "0.1.0", - "from": "http://registry.npmjs.org/forwarded/-/forwarded-0.1.0.tgz", + "from": "forwarded@>=0.1.0 <0.2.0", "resolved": "http://registry.npmjs.org/forwarded/-/forwarded-0.1.0.tgz" }, "ipaddr.js": { - "version": "0.1.9", - "from": "http://registry.npmjs.org/ipaddr.js/-/ipaddr.js-0.1.9.tgz", - "resolved": "http://registry.npmjs.org/ipaddr.js/-/ipaddr.js-0.1.9.tgz" + "version": "1.0.1", + "from": "ipaddr.js@1.0.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.0.1.tgz" } } }, "qs": { - "version": "2.4.1", - "from": "https://registry.npmjs.org/qs/-/qs-2.4.1.tgz", - "resolved": "https://registry.npmjs.org/qs/-/qs-2.4.1.tgz" + "version": "4.0.0", + "from": "qs@4.0.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-4.0.0.tgz" }, "range-parser": { - "version": "1.0.2", - "from": "http://registry.npmjs.org/range-parser/-/range-parser-1.0.2.tgz", - "resolved": "http://registry.npmjs.org/range-parser/-/range-parser-1.0.2.tgz" + "version": "1.0.3", + "from": "range-parser@>=1.0.2 <1.1.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.0.3.tgz" }, "send": { - "version": "0.12.2", - "from": "https://registry.npmjs.org/send/-/send-0.12.2.tgz", - "resolved": "https://registry.npmjs.org/send/-/send-0.12.2.tgz", + "version": "0.13.0", + "from": "send@0.13.0", + "resolved": "http://registry.npmjs.org/send/-/send-0.13.0.tgz", "dependencies": { "destroy": { "version": "1.0.3", - "from": "http://registry.npmjs.org/destroy/-/destroy-1.0.3.tgz", + "from": "destroy@1.0.3", "resolved": "http://registry.npmjs.org/destroy/-/destroy-1.0.3.tgz" }, + "http-errors": { + "version": "1.3.1", + "from": "http-errors@>=1.3.1 <1.4.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.3.1.tgz", + "dependencies": { + "inherits": { + "version": "2.0.1", + "from": "inherits@>=2.0.1 <2.1.0", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz" + } + } + }, "mime": { "version": "1.3.4", - "from": "https://registry.npmjs.org/mime/-/mime-1.3.4.tgz", + "from": "mime@1.3.4", "resolved": "https://registry.npmjs.org/mime/-/mime-1.3.4.tgz" }, "ms": { - "version": "0.7.0", - "from": "http://registry.npmjs.org/ms/-/ms-0.7.0.tgz", - "resolved": "http://registry.npmjs.org/ms/-/ms-0.7.0.tgz" + "version": "0.7.1", + "from": "ms@0.7.1", + "resolved": "https://registry.npmjs.org/ms/-/ms-0.7.1.tgz" + }, + "statuses": { + "version": "1.2.1", + "from": "statuses@>=1.2.1 <1.3.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.2.1.tgz" } } }, "serve-static": { - "version": "1.9.2", - "from": "https://registry.npmjs.org/serve-static/-/serve-static-1.9.2.tgz", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.9.2.tgz" + "version": "1.10.0", + "from": "serve-static@>=1.10.0 <1.11.0", + "resolved": "http://registry.npmjs.org/serve-static/-/serve-static-1.10.0.tgz" }, "type-is": { - "version": "1.6.1", - "from": "https://registry.npmjs.org/type-is/-/type-is-1.6.1.tgz", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.1.tgz", + "version": "1.6.9", + "from": "type-is@>=1.6.9 <1.7.0", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.9.tgz", "dependencies": { "media-typer": { "version": "0.3.0", - "from": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "resolved": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz" + "from": "media-typer@0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz" }, "mime-types": { - "version": "2.0.10", - "from": "https://registry.npmjs.org/mime-types/-/mime-types-2.0.10.tgz", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.0.10.tgz", + "version": "2.1.7", + "from": "mime-types@>=2.1.6 <2.2.0", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.7.tgz", "dependencies": { "mime-db": { - "version": "1.8.0", - "from": "https://registry.npmjs.org/mime-db/-/mime-db-1.8.0.tgz", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.8.0.tgz" + "version": "1.19.0", + "from": "mime-db@>=1.19.0 <1.20.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.19.0.tgz" } } } } }, - "vary": { - "version": "1.0.0", - "from": "http://registry.npmjs.org/vary/-/vary-1.0.0.tgz", - "resolved": "http://registry.npmjs.org/vary/-/vary-1.0.0.tgz" - }, "utils-merge": { "version": "1.0.0", - "from": "http://registry.npmjs.org/utils-merge/-/utils-merge-1.0.0.tgz", + "from": "utils-merge@1.0.0", "resolved": "http://registry.npmjs.org/utils-merge/-/utils-merge-1.0.0.tgz" + }, + "vary": { + "version": "1.0.1", + "from": "vary@>=1.0.1 <1.1.0", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.0.1.tgz" } } }, "json": { "version": "9.0.3", - "from": "https://registry.npmjs.org/json/-/json-9.0.3.tgz", + "from": "json@>=9.0.3 <10.0.0", "resolved": "https://registry.npmjs.org/json/-/json-9.0.3.tgz" }, "morgan": { - "version": "1.5.2", - "from": "https://registry.npmjs.org/morgan/-/morgan-1.5.2.tgz", - "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.5.2.tgz", + "version": "1.6.1", + "from": "morgan@>=1.5.1 <2.0.0", + "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.6.1.tgz", "dependencies": { "basic-auth": { - "version": "1.0.0", - "from": "http://registry.npmjs.org/basic-auth/-/basic-auth-1.0.0.tgz", - "resolved": "http://registry.npmjs.org/basic-auth/-/basic-auth-1.0.0.tgz" + "version": "1.0.3", + "from": "basic-auth@>=1.0.3 <1.1.0", + "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-1.0.3.tgz" }, "depd": { - "version": "1.0.0", - "from": "http://registry.npmjs.org/depd/-/depd-1.0.0.tgz", - "resolved": "http://registry.npmjs.org/depd/-/depd-1.0.0.tgz" + "version": "1.0.1", + "from": "depd@>=1.0.1 <1.1.0", + "resolved": "http://registry.npmjs.org/depd/-/depd-1.0.1.tgz" }, "on-finished": { - "version": "2.2.0", - "from": "https://registry.npmjs.org/on-finished/-/on-finished-2.2.0.tgz", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.2.0.tgz", + "version": "2.3.0", + "from": "on-finished@>=2.3.0 <2.4.0", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", "dependencies": { "ee-first": { - "version": "1.1.0", - "from": "http://registry.npmjs.org/ee-first/-/ee-first-1.1.0.tgz", - "resolved": "http://registry.npmjs.org/ee-first/-/ee-first-1.1.0.tgz" + "version": "1.1.1", + "from": "ee-first@1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz" } } + }, + "on-headers": { + "version": "1.0.1", + "from": "on-headers@>=1.0.0 <1.1.0", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.1.tgz" } } }, "proxy-middleware": { - "version": "0.11.0", - "from": "https://registry.npmjs.org/proxy-middleware/-/proxy-middleware-0.11.0.tgz", - "resolved": "https://registry.npmjs.org/proxy-middleware/-/proxy-middleware-0.11.0.tgz" + "version": "0.15.0", + "from": "proxy-middleware@>=0.15.0 <0.16.0", + "resolved": "https://registry.npmjs.org/proxy-middleware/-/proxy-middleware-0.15.0.tgz" }, "safetydance": { - "version": "0.0.16", - "from": "http://registry.npmjs.org/safetydance/-/safetydance-0.0.16.tgz", - "resolved": "http://registry.npmjs.org/safetydance/-/safetydance-0.0.16.tgz" + "version": "0.0.19", + "from": "safetydance@0.0.19", + "resolved": "https://registry.npmjs.org/safetydance/-/safetydance-0.0.19.tgz" }, "superagent": { "version": "0.21.0", - "from": "https://registry.npmjs.org/superagent/-/superagent-0.21.0.tgz", + "from": "superagent@>=0.21.0 <0.22.0", "resolved": "https://registry.npmjs.org/superagent/-/superagent-0.21.0.tgz", "dependencies": { "qs": { "version": "1.2.0", - "from": "https://registry.npmjs.org/qs/-/qs-1.2.0.tgz", + "from": "qs@1.2.0", "resolved": "https://registry.npmjs.org/qs/-/qs-1.2.0.tgz" }, "formidable": { "version": "1.0.14", - "from": "http://registry.npmjs.org/formidable/-/formidable-1.0.14.tgz", - "resolved": "http://registry.npmjs.org/formidable/-/formidable-1.0.14.tgz" + "from": "formidable@1.0.14", + "resolved": "https://registry.npmjs.org/formidable/-/formidable-1.0.14.tgz" }, "mime": { "version": "1.2.11", - "from": "https://registry.npmjs.org/mime/-/mime-1.2.11.tgz", + "from": "mime@1.2.11", "resolved": "https://registry.npmjs.org/mime/-/mime-1.2.11.tgz" }, "component-emitter": { "version": "1.1.2", - "from": "http://registry.npmjs.org/component-emitter/-/component-emitter-1.1.2.tgz", + "from": "component-emitter@1.1.2", "resolved": "http://registry.npmjs.org/component-emitter/-/component-emitter-1.1.2.tgz" }, "methods": { "version": "1.0.1", - "from": "https://registry.npmjs.org/methods/-/methods-1.0.1.tgz", + "from": "methods@1.0.1", "resolved": "https://registry.npmjs.org/methods/-/methods-1.0.1.tgz" }, "cookiejar": { "version": "2.0.1", - "from": "http://registry.npmjs.org/cookiejar/-/cookiejar-2.0.1.tgz", - "resolved": "http://registry.npmjs.org/cookiejar/-/cookiejar-2.0.1.tgz" + "from": "cookiejar@2.0.1", + "resolved": "https://registry.npmjs.org/cookiejar/-/cookiejar-2.0.1.tgz" }, "reduce-component": { "version": "1.0.1", - "from": "http://registry.npmjs.org/reduce-component/-/reduce-component-1.0.1.tgz", + "from": "reduce-component@1.0.1", "resolved": "http://registry.npmjs.org/reduce-component/-/reduce-component-1.0.1.tgz" }, "extend": { "version": "1.2.1", - "from": "https://registry.npmjs.org/extend/-/extend-1.2.1.tgz", + "from": "extend@>=1.2.1 <1.3.0", "resolved": "https://registry.npmjs.org/extend/-/extend-1.2.1.tgz" }, "form-data": { "version": "0.1.3", - "from": "http://registry.npmjs.org/form-data/-/form-data-0.1.3.tgz", + "from": "form-data@0.1.3", "resolved": "http://registry.npmjs.org/form-data/-/form-data-0.1.3.tgz", "dependencies": { "combined-stream": { "version": "0.0.7", - "from": "https://registry.npmjs.org/combined-stream/-/combined-stream-0.0.7.tgz", + "from": "combined-stream@>=0.0.4 <0.1.0", "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-0.0.7.tgz", "dependencies": { "delayed-stream": { "version": "0.0.5", - "from": "http://registry.npmjs.org/delayed-stream/-/delayed-stream-0.0.5.tgz", + "from": "delayed-stream@0.0.5", "resolved": "http://registry.npmjs.org/delayed-stream/-/delayed-stream-0.0.5.tgz" } } + }, + "async": { + "version": "0.9.2", + "from": "async@>=0.9.0 <0.10.0", + "resolved": "https://registry.npmjs.org/async/-/async-0.9.2.tgz" } } }, "readable-stream": { "version": "1.0.27-1", - "from": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.27-1.tgz", + "from": "readable-stream@1.0.27-1", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.27-1.tgz", "dependencies": { "core-util-is": { "version": "1.0.1", - "from": "http://registry.npmjs.org/core-util-is/-/core-util-is-1.0.1.tgz", - "resolved": "http://registry.npmjs.org/core-util-is/-/core-util-is-1.0.1.tgz" + "from": "core-util-is@>=1.0.0 <1.1.0", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.1.tgz" }, "isarray": { "version": "0.0.1", - "from": "http://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "resolved": "http://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz" + "from": "isarray@0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz" }, "string_decoder": { "version": "0.10.31", - "from": "http://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", - "resolved": "http://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz" + "from": "string_decoder@>=0.10.0 <0.11.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz" }, "inherits": { "version": "2.0.1", - "from": "http://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz", - "resolved": "http://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz" + "from": "inherits@>=2.0.1 <2.1.0", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz" } } } From a242881101d94f3ef0042293e8180b7049d22e8d Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 11 Nov 2015 15:56:14 -0800 Subject: [PATCH 155/234] change engine requirements --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index d70c990da..7b91e29cc 100644 --- a/package.json +++ b/package.json @@ -10,7 +10,7 @@ "type": "git" }, "engines": [ - "node >= 0.10.0" + "node >= 4.2.0" ], "dependencies": { "async": "^1.5.0", From 7a1cdd62a492c7675a464b6045ff21573b06ecb1 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 11 Nov 2015 16:02:42 -0800 Subject: [PATCH 156/234] install node 4.2.2 --- images/initializeBaseUbuntuImage.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index f54d2749c..ffdce5c02 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -157,8 +157,9 @@ echo "${INSTALLER_REVISION}" > "${INSTALLER_SOURCE_DIR}/REVISION" echo "==== Install nodejs ====" apt-get install -y curl -curl -sL https://deb.nodesource.com/setup_0.12 | bash - -apt-get install -y nodejs +mkdir -p /usr/local/node-4.2.2 +curl -sL https://nodejs.org/dist/v4.2.2/node-v4.2.2-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-4.2.2 +ln -s /usr/local/bin/node-4.2.2/bin/node /usr/bin/node apt-get install -y python # Install python which is required for npm rebuild echo "=== Rebuilding npm packages ===" From 9ae49e71699a0b2debc167424702bc91487baeeb Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 11 Nov 2015 22:04:58 -0800 Subject: [PATCH 157/234] link npm --- images/initializeBaseUbuntuImage.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index ffdce5c02..818fec7db 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -160,6 +160,7 @@ apt-get install -y curl mkdir -p /usr/local/node-4.2.2 curl -sL https://nodejs.org/dist/v4.2.2/node-v4.2.2-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-4.2.2 ln -s /usr/local/bin/node-4.2.2/bin/node /usr/bin/node +ln -s /usr/local/bin/node-4.2.2/bin/npm /usr/bin/npm apt-get install -y python # Install python which is required for npm rebuild echo "=== Rebuilding npm packages ===" From b9a3c508c99f6d30849d187c954b17df39dfc7c1 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 12 Nov 2015 06:58:01 -0800 Subject: [PATCH 158/234] Fix target path --- images/initializeBaseUbuntuImage.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 818fec7db..2201f9bc5 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -159,8 +159,8 @@ echo "==== Install nodejs ====" apt-get install -y curl mkdir -p /usr/local/node-4.2.2 curl -sL https://nodejs.org/dist/v4.2.2/node-v4.2.2-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-4.2.2 -ln -s /usr/local/bin/node-4.2.2/bin/node /usr/bin/node -ln -s /usr/local/bin/node-4.2.2/bin/npm /usr/bin/npm +ln -s /usr/local/node-4.2.2/bin/node /usr/bin/node +ln -s /usr/local/node-4.2.2/bin/npm /usr/bin/npm apt-get install -y python # Install python which is required for npm rebuild echo "=== Rebuilding npm packages ===" From aea39a83b626bea7dc3f34ca0bd6051e64332438 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 16 Nov 2015 12:11:57 -0800 Subject: [PATCH 159/234] change yellowtent key to caas --- images/createDigitalOceanImage.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index 7abbcc09e..4239b306e 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -83,15 +83,15 @@ if [[ -z "${box_name}" ]]; then box_name="box-${deploy_env}-${pretty_revision}-${now}" # remove slashes # create a new droplet if no name given - yellowtent_ssh_key_id=$(get_ssh_key_id "yellowtent") - if [[ -z "${yellowtent_ssh_key_id}" ]]; then - echo "Could not query yellowtent ssh key" + caas_ssh_key_id=$(get_ssh_key_id "caas") + if [[ -z "${caas_ssh_key_id}" ]]; then + echo "Could not query caas ssh key" exit 1 fi - echo "Detected yellowtent ssh key id: ${yellowtent_ssh_key_id}" # 124654 for yellowtent key + echo "Detected yellowtent ssh key id: ${caas_ssh_key_id}" echo "Creating Droplet with name [${box_name}] at [${image_regions[0]}] with size [${box_size}]" - droplet_id=$(create_droplet ${yellowtent_ssh_key_id} ${box_name} ${box_size} ${image_regions[0]}) + droplet_id=$(create_droplet ${caas_ssh_key_id} ${box_name} ${box_size} ${image_regions[0]}) if [[ -z "${droplet_id}" ]]; then echo "Failed to create droplet" exit 1 From 4e872865a3f15a891bac62895238f791b0efdec6 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 16 Nov 2015 12:15:15 -0800 Subject: [PATCH 160/234] use different keys for different env --- images/createDigitalOceanImage.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index 4239b306e..dffd68cb3 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -9,13 +9,6 @@ assertNotEmpty() { readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" readonly INSTALLER_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)" readonly JSON="${INSTALLER_DIR}/node_modules/.bin/json" -readonly ssh_keys="${HOME}/.ssh/id_rsa_yellowtent" - -readonly scp202="scp -P 202 -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" -readonly scp22="scp -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" - -readonly ssh202="ssh -p 202 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" -readonly ssh22="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" installer_revision=$(git rev-parse HEAD) box_size="512mb" @@ -63,8 +56,15 @@ else fi source "${SCRIPT_DIR}/digitalOceanFunctions.sh" +readonly ssh_keys="${HOME}/.ssh/id_rsa_caas_${deploy_env}" +readonly scp202="scp -P 202 -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" +readonly scp22="scp -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" + +readonly ssh202="ssh -p 202 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" +readonly ssh22="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" + if [[ ! -f "${ssh_keys}" ]]; then - echo "yellowtent ssh key is missing" + echo "caas ssh key is missing (pick it up from secrets repo)" exit 1 fi From 5a0c80611eb74f447cd1b74cf4f182c7f55f128f Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 16 Nov 2015 12:15:44 -0800 Subject: [PATCH 161/234] better error message --- images/createDigitalOceanImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index dffd68cb3..850b24610 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -64,7 +64,7 @@ readonly ssh202="ssh -p 202 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChec readonly ssh22="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" if [[ ! -f "${ssh_keys}" ]]; then - echo "caas ssh key is missing (pick it up from secrets repo)" + echo "caas ssh key is missing at ${ssh_keys} (pick it up from secrets repo)" exit 1 fi From db97d7e836fc51dce0ecbd82a3f362e6a97ff5c7 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 16 Nov 2015 13:04:09 -0800 Subject: [PATCH 162/234] Fix options usage --- images/createDigitalOceanImage.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index 850b24610..51ffb7959 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -57,11 +57,11 @@ fi source "${SCRIPT_DIR}/digitalOceanFunctions.sh" readonly ssh_keys="${HOME}/.ssh/id_rsa_caas_${deploy_env}" -readonly scp202="scp -P 202 -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" -readonly scp22="scp -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" +readonly scp202="scp -P 202 -o IdentitiesOnly=yes' -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" +readonly scp22="scp -o IdentitiesOnly=yes -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" -readonly ssh202="ssh -p 202 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" -readonly ssh22="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" +readonly ssh202="ssh -p 202 -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" +readonly ssh22="ssh -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" if [[ ! -f "${ssh_keys}" ]]; then echo "caas ssh key is missing at ${ssh_keys} (pick it up from secrets repo)" From ac14b08af4cd31ae0279b81346b62246b26b473e Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 12 Nov 2015 12:17:14 -0800 Subject: [PATCH 163/234] we have to use 4.1.1 --- images/initializeBaseUbuntuImage.sh | 9 +++++---- package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 2201f9bc5..b4f41f7df 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -157,10 +157,11 @@ echo "${INSTALLER_REVISION}" > "${INSTALLER_SOURCE_DIR}/REVISION" echo "==== Install nodejs ====" apt-get install -y curl -mkdir -p /usr/local/node-4.2.2 -curl -sL https://nodejs.org/dist/v4.2.2/node-v4.2.2-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-4.2.2 -ln -s /usr/local/node-4.2.2/bin/node /usr/bin/node -ln -s /usr/local/node-4.2.2/bin/npm /usr/bin/npm +# Cannot use anything above 4.1.1 - https://github.com/nodejs/node/issues/3803 +mkdir -p /usr/local/node-4.1.1 +curl -sL https://nodejs.org/dist/v4.1.1/node-v4.2.2-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-4.2.2 +ln -s /usr/local/node-4.1.1/bin/node /usr/bin/node +ln -s /usr/local/node-4.1.1/bin/npm /usr/bin/npm apt-get install -y python # Install python which is required for npm rebuild echo "=== Rebuilding npm packages ===" diff --git a/package.json b/package.json index 7b91e29cc..4d3b042e9 100644 --- a/package.json +++ b/package.json @@ -10,7 +10,7 @@ "type": "git" }, "engines": [ - "node >= 4.2.0" + "node >=4.0.0 <=4.1.1" ], "dependencies": { "async": "^1.5.0", From 00ee89a6934e505526e987634611c0cc15811b37 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Thu, 12 Nov 2015 12:28:36 -0800 Subject: [PATCH 164/234] fix paths --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index b4f41f7df..ca199fa7b 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -159,7 +159,7 @@ echo "==== Install nodejs ====" apt-get install -y curl # Cannot use anything above 4.1.1 - https://github.com/nodejs/node/issues/3803 mkdir -p /usr/local/node-4.1.1 -curl -sL https://nodejs.org/dist/v4.1.1/node-v4.2.2-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-4.2.2 +curl -sL https://nodejs.org/dist/v4.1.1/node-v4.1.1-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-4.1.1 ln -s /usr/local/node-4.1.1/bin/node /usr/bin/node ln -s /usr/local/node-4.1.1/bin/npm /usr/bin/npm apt-get install -y python # Install python which is required for npm rebuild From 47b6819ec8c41d3570d72d9f60e2b1dddec0b4b5 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 16 Nov 2015 14:48:05 -0800 Subject: [PATCH 165/234] scp does not require this option --- images/createDigitalOceanImage.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index 51ffb7959..ba6d0c0be 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -57,8 +57,8 @@ fi source "${SCRIPT_DIR}/digitalOceanFunctions.sh" readonly ssh_keys="${HOME}/.ssh/id_rsa_caas_${deploy_env}" -readonly scp202="scp -P 202 -o IdentitiesOnly=yes' -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" -readonly scp22="scp -o IdentitiesOnly=yes -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" +readonly scp202="scp -P 202 -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" +readonly scp22="scp -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" readonly ssh202="ssh -p 202 -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" readonly ssh22="ssh -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" From 87d109727abe0d0f2cba9682c3887309f5269f83 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 16 Nov 2015 15:08:26 -0800 Subject: [PATCH 166/234] fix path to secrets --- images/createDigitalOceanImage.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index ba6d0c0be..f512bde9e 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -147,8 +147,10 @@ if ! $ssh22 "root@${droplet_ip}" "/bin/bash /root/initializeBaseUbuntuImage.sh $ fi echo "Copy over certs" -$scp202 -r "${INSTALLER_DIR}/../keys/installer/" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" -$scp202 "${INSTALLER_DIR}/../keys/installer_ca/ca.crt" "root@${droplet_ip}:/home/yellowtent/installer/src/certs/" +cd "${SCRIPT_DIR}/../../secrets" +blackbox_cat installer/server.crt.gpg | $ssh202 "root@${droplet_ip}" "cat - > /home/yellowtent/installer/src/certs/server.crt" +blackbox_cat installer/server.key.gpg | $ssh202 "root@${droplet_ip}" "cat - > /home/yellowtent/installer/src/certs/server.key" +blackbox_cat installer_ca/ca.crt.gpg | $ssh202 "root@${droplet_ip}" "cat - > /home/yellowtent/installer/src/certs/ca.crt" echo "Shutting down droplet with id : ${droplet_id}" $ssh202 "root@${droplet_ip}" "shutdown -f now" || true # shutdown sometimes terminates ssh connection immediately making this command fail From 3cd0cc01c42f76a194f99252506bc5366b36f786 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 16 Nov 2015 15:20:08 -0800 Subject: [PATCH 167/234] Add test certs This is simply a self-signed cert --- src/server.js | 4 ++-- src/test/certs/ca.crt | 24 ++++++++++++++++++++++++ src/test/certs/server.crt | 20 ++++++++++++++++++++ src/test/certs/server.key | 27 +++++++++++++++++++++++++++ 4 files changed, 73 insertions(+), 2 deletions(-) create mode 100644 src/test/certs/ca.crt create mode 100644 src/test/certs/server.crt create mode 100644 src/test/certs/server.key diff --git a/src/server.js b/src/server.js index e517919f1..c098b2171 100755 --- a/src/server.js +++ b/src/server.js @@ -115,8 +115,8 @@ function startProvisionServer(callback) { router.post('/api/v1/installer/retire', retire); - var caPath = path.join(__dirname, process.env.NODE_ENV === 'test' ? '../../keys/installer_ca' : 'certs'); - var certPath = path.join(__dirname, process.env.NODE_ENV === 'test' ? '../../keys/installer' : 'certs'); + var caPath = path.join(__dirname, process.env.NODE_ENV === 'test' ? 'test/certs' : 'certs'); + var certPath = path.join(__dirname, process.env.NODE_ENV === 'test' ? 'test/certs' : 'certs'); var options = { key: fs.readFileSync(path.join(certPath, 'server.key')), diff --git a/src/test/certs/ca.crt b/src/test/certs/ca.crt new file mode 100644 index 000000000..e053e9bef --- /dev/null +++ b/src/test/certs/ca.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID9zCCAt+gAwIBAgIJAMPL81PAySGAMA0GCSqGSIb3DQEBBQUAMFoxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTELMAkGA1UEBxMCU0MxFTATBgNVBAoTDENsb3Vk +cm9uIEluYzEaMBgGA1UEAxMRSW5zdGFsbCBTZXJ2ZXIgQ0EwHhcNMTUwMTE2MDEy +NDM2WhcNMTYwMTE2MDEyNDM2WjBaMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0Ex +CzAJBgNVBAcTAlNDMRUwEwYDVQQKEwxDbG91ZHJvbiBJbmMxGjAYBgNVBAMTEUlu +c3RhbGwgU2VydmVyIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +31TkOEC3JXtieHiZgM5qWw771rV2JEDKs1C68+n/OmKrp3zAQV08A+w/KVurn1P9 +gZlYF+CBRVZDV8lYbWzc6PgMPWEDHHV72FS5Kq6ZyikB+r5OQJ8qU61y840h6ZCD +MEYr6N9qXm9wSApJBQ/key/pg7+95B2CFYRrg5NVstIYqpJ1lyxCMFTrjYAmteOB +Bi/4GPApu9Tj0ifTMbZFGTPtWm/yhCZ6Anm6w+ok9tDMpPC6kRgUJ3B4HY75D9dV +aWSls9jdZw4JU1jIFlAdUjhGEEmHWOzAD8vBjvuBqcf9NQwvieWG5tDYfZ6DYRC2 +/aG1C5UWhFLDv2/F+56k3wIDAQABo4G/MIG8MB0GA1UdDgQWBBQ088hd2sIIqVtw +xJeAkCORdclFRjCBjAYDVR0jBIGEMIGBgBQ088hd2sIIqVtwxJeAkCORdclFRqFe +pFwwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTQzEVMBMG +A1UEChMMQ2xvdWRyb24gSW5jMRowGAYDVQQDExFJbnN0YWxsIFNlcnZlciBDQYIJ +AMPL81PAySGAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAJcW+Wmz +/o0JBC2WsMjUjxVrzOiu9bdKQ1yn83Zcv74zEfmWfJotVOK1oKsTyOZfTvvWrpLc +GXXhh4oXWsNnFII3uJyZIY3v/DoE0pa7TCZhLYFbL2kEaC5rTwe/+VScHy5ROOiu ++gnzOU3MyrcMTT0v4qcT0NlkIptRdvIYNpqfXO6vG9sMp4C/NwWhl/IfHkIAv0eH +l3HTr8wxgldCjxbnJgYkyUcWAmLi2YEXKCEPWmsfqp3Z+Ng1M+A9OKjJLHWowl9X +4arvn6WaUbZjRxxjvK199If1R6KWwD6YQ9cKH4Ex4/hhIqg5I3MQFu+pOq/b0XH/ +9I10o6FVU7vcFkQ= +-----END CERTIFICATE----- diff --git a/src/test/certs/server.crt b/src/test/certs/server.crt new file mode 100644 index 000000000..7480dde1a --- /dev/null +++ b/src/test/certs/server.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDMDCCAhgCCQCDr1HQJBr1izANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJV +UzELMAkGA1UECAwCQ0ExCzAJBgNVBAcMAlNDMREwDwYDVQQKDAhDbG91ZHJvbjEe +MBwGA1UEAwwVaW5zdGFsbGVyLmNsb3Vkcm9uLmlvMB4XDTE1MTExNjIzMTcwMloX +DTE2MTExNTIzMTcwMlowWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQswCQYD +VQQHDAJTQzERMA8GA1UECgwIQ2xvdWRyb24xHjAcBgNVBAMMFWluc3RhbGxlci5j +bG91ZHJvbi5pbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK0suQX7 +hKBhYsSH0msnEPVbRDIotYbtVDav/v7Sb/fRU7qVoL31tj2iZRDJRJ27uRM3J4ye +6hgJAAwQGtfXrcVZY3SOAlGXsFZF0wgBCw0pGtgF3HA1BcwbCwAd06J6w3lKActA +DMEUio/jRXpYELUU2Nzopq0MsMyyBSBkNC18i0HUB8vkF8yQvb1OpbcxERbpf3D5 +zjeFf5kIE/k8lwBz1vMF0uAA2GfcXxs3dyDaxVteWeevVYZzAoY9EcUyBWX7OQnx +aUygl3OywN+xOJKXKCQpckzDvr9Vp1sKItoMMy5y81SyNhZIMBYGGG+oNp/wSgQf +Cht+LupI+bXoYrMCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAgPHZx52qYuEUdzVO +t/+VXO7dxJkONYU8sjTYIfJme8ZZd7beZBMUni5s2gvv6i5HFyJ2Ol88sv8hAaI/ +6Vmbszml+5tLyPK8Gygk62l6OcKDwU/yazTxxCApulNy1SV34kzruXUMZ28ybcqA +XJywMMx4RDmSIBXPdDCeaOgYwI7Wk56obJ8sa2+Z6100GNoX+qBSOsWMMJW+ohnp +eQWHkTOJzU4hIMfZCbW0cF5Xn/35xEh0xxaH7XWglJLM9neBPba+Ydz7567mN9co +vgv2dE5ZOKSjG63CtUvv819dvbWVKq8jiMCqPGRcr1iSeqbC02tnx0W762980uSx +QfOgAw== +-----END CERTIFICATE----- diff --git a/src/test/certs/server.key b/src/test/certs/server.key new file mode 100644 index 000000000..0a732c4d3 --- /dev/null +++ b/src/test/certs/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEArSy5BfuEoGFixIfSaycQ9VtEMii1hu1UNq/+/tJv99FTupWg +vfW2PaJlEMlEnbu5EzcnjJ7qGAkADBAa19etxVljdI4CUZewVkXTCAELDSka2AXc +cDUFzBsLAB3TonrDeUoBy0AMwRSKj+NFelgQtRTY3OimrQywzLIFIGQ0LXyLQdQH +y+QXzJC9vU6ltzERFul/cPnON4V/mQgT+TyXAHPW8wXS4ADYZ9xfGzd3INrFW15Z +569VhnMChj0RxTIFZfs5CfFpTKCXc7LA37E4kpcoJClyTMO+v1WnWwoi2gwzLnLz +VLI2FkgwFgYYb6g2n/BKBB8KG34u6kj5tehiswIDAQABAoIBAGNAQ5bbLYsh5ZKP +6ZhCHqUQtsgsrsVzFhX1zqbLgyK8VUmV4jedMOKoRVZWlD32zj7mGIOuvKoj1mQT +gt78HPsDnU266jdLQeRgRm/K8UOMsHbo/QtOSFFPmoFpltcDly7XrKmJvwWWOUf4 +UOSqvoCaPyR1Lrn1kQrwaKHE7Ga4jfyOrIq9JI7y/ih+Y7D8xcMnyLAsjyVkSAtr ++XrGNHcx3yPuBmjaOglzeb6Ksdpt4ETElrvH3ByT5EV2zUVr9Txv+m8xSVBZfea9 +aE7lWSQoOUz+e6RhIX3Df/QfR6KkDblAwEF9Se98DWcz46Y34oc2E0lSoJYpoPxP +vbRlfDkCgYEA3nAc8kDRkbQObSfnVjpijBSP5hfr3jX+XTbxK7Y3aTMViY+87iWK +bLNuX+2JRCmRjk0wy2YXnJQV3sU/EO5gLhOz9060MIHgFISq4KRgPorN/EFWryOe +mDzhPIuhZLMetv0ajS3Z5IxIAs+FLu7Yx9em80q540UA3kXsFWe2lpUCgYEAx03E +kk5zLirVFtoyP/yAES+KVppqBweCUA5vVxB8H26oIhi8G8kT4b77x6wXxQzdsA4H +a4ou3ZBZVK41PREgG1MWgzpbwk49T1FX6TLtvdhr/9QhYC+RIynynA/pA36LSKT5 +pvWegYB4+9jaPrQ5L1zcrLF2XlTsgpuC43kXKicCgYA0dXxeJatHEY/VbnPAgkR7 +hN3rBfk6jsFOeoamKHMo/EM4Dg4gm/npaOe+9+ZHjQYm6U14qrsm0kXWI+6br5w/ +QaZPzN/yEK8oJ6GlGR8ZoOKzezVWWLAudy0neka12QiFX2vDn+yjWfIht49RYkL9 +3n4hIp50WvG5egQTiEIngQKBgCn9yJzKypm/jIX0EwJIQPNeANeeURiKDHqxj+PY +JU66EdKdQ4TXKMk3Y/T93UQ3Ib4mNooB4z3rW+brjWwAX7NiHiwn741QzroXeV44 +zL5jCt4r45xQaVPvUp5u+7kwwEfd+nui5HKEjvkBB3qOnj3MYvI/saDOY8Zg3YLv +0GGhAoGANBwFcDgwP9KDt0NxKXhe3rlSUyfGSSUF89hZPrLDCiaGFURD/w4j3EGr +Ui9Rcwm2ymqlFzTO4JYKy1/pRCWA7GDfslICJPOPG3Wytsjog0WymQuMjYC2tL/+ +RwD0qG0/aBGE4PbigPRoJ/7BGZLKtdy99P0wyFC3o6OBoAl3Zqo= +-----END RSA PRIVATE KEY----- From c7acdbf20dd87b34b3fa865d48685e02f6c1d7b8 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 16 Nov 2015 15:21:42 -0800 Subject: [PATCH 168/234] add empty certs dir --- src/certs/.gitignore | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/certs/.gitignore diff --git a/src/certs/.gitignore b/src/certs/.gitignore new file mode 100644 index 000000000..e69de29bb From c286b491d65a18ee89857263dd67f027df192029 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 16 Nov 2015 16:43:53 -0800 Subject: [PATCH 169/234] Fix debug output --- images/createDigitalOceanImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index f512bde9e..9f4c0e982 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -88,7 +88,7 @@ if [[ -z "${box_name}" ]]; then echo "Could not query caas ssh key" exit 1 fi - echo "Detected yellowtent ssh key id: ${caas_ssh_key_id}" + echo "Detected caas ssh key id: ${caas_ssh_key_id}" echo "Creating Droplet with name [${box_name}] at [${image_regions[0]}] with size [${box_size}]" droplet_id=$(create_droplet ${caas_ssh_key_id} ${box_name} ${box_size} ${image_regions[0]}) From 1e19f68cb5e75d2a4e2276bfe611020e0d9f1cd2 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 18 Nov 2015 11:52:41 -0800 Subject: [PATCH 170/234] Install docker binaries instead of apt The apt binaries lxc-* are obsolete and replaced with 'docker-engine' packages. The new repos however do not allow pinning to a specific version. so brain dead. https://docs.docker.com/engine/installation/binaries/#get-the-linux-binary --- images/initializeBaseUbuntuImage.sh | 54 +++++++++++++++++++---------- 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index ca199fa7b..f8cf8d3da 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -24,6 +24,7 @@ export DEBIAN_FRONTEND=noninteractive echo "=== Upgrade ===" apt-get update apt-get upgrade -y +apt-get install -y curl # Setup firewall before everything. Atleast docker 1.5 creates it's own chain and the -X below will remove it # Do NOT use iptables-persistent because it's startup ordering conflicts with docker @@ -66,30 +67,50 @@ echo "==== Install btrfs tools ===" apt-get -y install btrfs-tools echo "==== Install docker ====" -# see http://idolstarastronomer.com/painless-docker.html -echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list -apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 -apt-get update -apt-get -y install lxc-docker-1.7.0 -ln -sf /usr/bin/docker.io /usr/local/bin/docker +# install docker from binary to pin it to a specific version. the current debian repo does not allow pinning +curl https://get.docker.com/builds/Linux/x86_64/docker-1.8.3 > /usr/bin/docker +chmod +x /usr/bin/docker +groupadd docker +cat > /etc/systemd/system/docker.socket < /etc/systemd/system/docker.service <> /etc/fstab mkdir -p "${USER_DATA_DIR}" && mount "${USER_DATA_FILE}" mkdir -p "${USER_DATA_DIR}/docker" -sed -e "s,ExecStart=.*,ExecStart=/usr/bin/docker -d -H fd:// -s btrfs -g ${USER_DATA_DIR}/docker --log-driver=journald," -i /lib/systemd/system/docker.service -systemctl enable docker # give docker sometime to start up and create iptables rules +systemctl enable docker systemctl start docker sleep 10 @@ -156,7 +177,6 @@ tar xvf /root/installer.tar -C "${INSTALLER_SOURCE_DIR}" && rm /root/installer.t echo "${INSTALLER_REVISION}" > "${INSTALLER_SOURCE_DIR}/REVISION" echo "==== Install nodejs ====" -apt-get install -y curl # Cannot use anything above 4.1.1 - https://github.com/nodejs/node/issues/3803 mkdir -p /usr/local/node-4.1.1 curl -sL https://nodejs.org/dist/v4.1.1/node-v4.1.1-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-4.1.1 @@ -200,7 +220,6 @@ RemainAfterExit=yes [Install] WantedBy=multi-user.target EOF - systemctl enable iptables-restore # Allocate swap files @@ -250,4 +269,3 @@ sed -e 's/^#\?Port .*/Port 202/g' \ # required so we can connect to this machine since port 22 is blocked by iptables by now systemctl reload sshd - From 331b4d85247bb5765b055b44b0e5f694dbe0ff4c Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Wed, 18 Nov 2015 18:28:28 -0800 Subject: [PATCH 171/234] use docker 1.9.0 --- images/initializeBaseUbuntuImage.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index f8cf8d3da..638e6d9fa 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -26,7 +26,7 @@ apt-get update apt-get upgrade -y apt-get install -y curl -# Setup firewall before everything. Atleast docker 1.5 creates it's own chain and the -X below will remove it +# Setup firewall before everything. docker creates it's own chain and the -X below will remove it # Do NOT use iptables-persistent because it's startup ordering conflicts with docker echo "=== Setting up firewall ===" # clear tables and set default policy @@ -68,7 +68,7 @@ apt-get -y install btrfs-tools echo "==== Install docker ====" # install docker from binary to pin it to a specific version. the current debian repo does not allow pinning -curl https://get.docker.com/builds/Linux/x86_64/docker-1.8.3 > /usr/bin/docker +curl https://get.docker.com/builds/Linux/x86_64/docker-1.9.0 > /usr/bin/docker chmod +x /usr/bin/docker groupadd docker cat > /etc/systemd/system/docker.socket < Date: Mon, 23 Nov 2015 08:32:54 -0800 Subject: [PATCH 172/234] s/droplet/server --- images/createDigitalOceanImage.sh | 74 +++++++++++++++---------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index 9f4c0e982..f3424a1dc 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -14,9 +14,9 @@ installer_revision=$(git rev-parse HEAD) box_size="512mb" image_regions=(sfo1 ams3) box_name="" -droplet_id="" -droplet_ip="" -destroy_droplet="yes" +server_id="" +server_ip="" +destroy_server="yes" deploy_env="dev" # Only GNU getopt supports long options. OS X comes bundled with the BSD getopt @@ -33,8 +33,8 @@ while true; do --revision) installer_revision="$2"; shift 2;; --regions) image_regions=("$2"); shift 2;; # parse as whitespace separated array --size) box_size="$2"; shift 2;; - --box) box_name="$2"; destroy_droplet="no"; shift 2;; - --no-destroy) destroy_droplet="no"; shift 2;; + --box) box_name="$2"; destroy_server="no"; shift 2;; + --no-destroy) destroy_server="no"; shift 2;; --) break;; *) echo "Unknown option $1"; exit 1;; esac @@ -82,7 +82,7 @@ if [[ -z "${box_name}" ]]; then # if you change this, change the regexp is appstore/janitor.js box_name="box-${deploy_env}-${pretty_revision}-${now}" # remove slashes - # create a new droplet if no name given + # create a new server if no name given caas_ssh_key_id=$(get_ssh_key_id "caas") if [[ -z "${caas_ssh_key_id}" ]]; then echo "Could not query caas ssh key" @@ -90,34 +90,34 @@ if [[ -z "${box_name}" ]]; then fi echo "Detected caas ssh key id: ${caas_ssh_key_id}" - echo "Creating Droplet with name [${box_name}] at [${image_regions[0]}] with size [${box_size}]" - droplet_id=$(create_droplet ${caas_ssh_key_id} ${box_name} ${box_size} ${image_regions[0]}) - if [[ -z "${droplet_id}" ]]; then - echo "Failed to create droplet" + echo "Creating Server with name [${box_name}] at [${image_regions[0]}] with size [${box_size}]" + server_id=$(create_droplet ${caas_ssh_key_id} ${box_name} ${box_size} ${image_regions[0]}) + if [[ -z "${server_id}" ]]; then + echo "Failed to create server" exit 1 fi - echo "Created droplet with id: ${droplet_id}" + echo "Created server with id: ${server_id}" # If we run scripts overenthusiastically without the wait, setup script randomly fails - echo -n "Waiting 120 seconds for droplet creation" + echo -n "Waiting 120 seconds for server creation" for i in $(seq 1 24); do echo -n "." sleep 5 done echo "" else - droplet_id=$(get_droplet_id "${box_name}") - echo "Reusing droplet with id: ${droplet_id}" + server_id=$(get_droplet_id "${box_name}") + echo "Reusing server with id: ${server_id}" - power_on_droplet "${droplet_id}" + power_on_droplet "${server_id}" fi # Query DO until we get an IP while true; do - echo "Trying to get the droplet IP" - droplet_ip=$(get_droplet_ip "${droplet_id}") - if [[ "${droplet_ip}" != "" ]]; then - echo "Droplet IP : [${droplet_ip}]" + echo "Trying to get the server IP" + server_ip=$(get_droplet_ip "${server_id}") + if [[ "${server_ip}" != "" ]]; then + echo "Server IP : [${server_ip}]" break fi echo "Timedout, trying again in 10 seconds" @@ -125,8 +125,8 @@ while true; do done while true; do - echo "Trying to copy init script to droplet" - if $scp22 "${SCRIPT_DIR}/initializeBaseUbuntuImage.sh" root@${droplet_ip}:.; then + echo "Trying to copy init script to server" + if $scp22 "${SCRIPT_DIR}/initializeBaseUbuntuImage.sh" root@${server_ip}:.; then break fi echo "Timedout, trying again in 30 seconds" @@ -134,46 +134,46 @@ while true; do done echo "Copying INFRA_VERSION" -$scp22 "${SCRIPT_DIR}/../../box/setup/INFRA_VERSION" root@${droplet_ip}:. +$scp22 "${SCRIPT_DIR}/../../box/setup/INFRA_VERSION" root@${server_ip}:. echo "Copying installer source" cd "${INSTALLER_DIR}" -git archive --format=tar HEAD | $ssh22 "root@${droplet_ip}" "cat - > /root/installer.tar" +git archive --format=tar HEAD | $ssh22 "root@${server_ip}" "cat - > /root/installer.tar" echo "Executing init script" -if ! $ssh22 "root@${droplet_ip}" "/bin/bash /root/initializeBaseUbuntuImage.sh ${installer_revision}"; then +if ! $ssh22 "root@${server_ip}" "/bin/bash /root/initializeBaseUbuntuImage.sh ${installer_revision}"; then echo "Init script failed" exit 1 fi echo "Copy over certs" cd "${SCRIPT_DIR}/../../secrets" -blackbox_cat installer/server.crt.gpg | $ssh202 "root@${droplet_ip}" "cat - > /home/yellowtent/installer/src/certs/server.crt" -blackbox_cat installer/server.key.gpg | $ssh202 "root@${droplet_ip}" "cat - > /home/yellowtent/installer/src/certs/server.key" -blackbox_cat installer_ca/ca.crt.gpg | $ssh202 "root@${droplet_ip}" "cat - > /home/yellowtent/installer/src/certs/ca.crt" +blackbox_cat installer/server.crt.gpg | $ssh202 "root@${server_ip}" "cat - > /home/yellowtent/installer/src/certs/server.crt" +blackbox_cat installer/server.key.gpg | $ssh202 "root@${server_ip}" "cat - > /home/yellowtent/installer/src/certs/server.key" +blackbox_cat installer_ca/ca.crt.gpg | $ssh202 "root@${server_ip}" "cat - > /home/yellowtent/installer/src/certs/ca.crt" -echo "Shutting down droplet with id : ${droplet_id}" -$ssh202 "root@${droplet_ip}" "shutdown -f now" || true # shutdown sometimes terminates ssh connection immediately making this command fail +echo "Shutting down server with id : ${server_id}" +$ssh202 "root@${server_ip}" "shutdown -f now" || true # shutdown sometimes terminates ssh connection immediately making this command fail # wait 10 secs for actual shutdown -echo "Waiting for 10 seconds for droplet to shutdown" +echo "Waiting for 10 seconds for server to shutdown" sleep 30 -echo "Powering off droplet" -power_off_droplet "${droplet_id}" +echo "Powering off server" +power_off_droplet "${server_id}" snapshot_name="box-${deploy_env}-${pretty_revision}-${now}" echo "Snapshotting as ${snapshot_name}" -snapshot_droplet "${droplet_id}" "${snapshot_name}" +snapshot_droplet "${server_id}" "${snapshot_name}" image_id=$(get_image_id "${snapshot_name}") echo "Image id is ${image_id}" -if [[ "${destroy_droplet}" == "yes" ]]; then - echo "Destroying droplet" - destroy_droplet "${droplet_id}" +if [[ "${destroy_server}" == "yes" ]]; then + echo "Destroying server" + destroy_droplet "${server_id}" else - echo "Skipping droplet destroy" + echo "Skipping server destroy" fi echo "Transferring image to other regions" From bf4601470b54b5f16afa637370a387423dfd039f Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 08:33:57 -0800 Subject: [PATCH 173/234] remove functions not part of vps api --- images/digitalOceanFunctions.sh | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/images/digitalOceanFunctions.sh b/images/digitalOceanFunctions.sh index cee5fa8e2..63fb18a9e 100644 --- a/images/digitalOceanFunctions.sh +++ b/images/digitalOceanFunctions.sh @@ -142,25 +142,6 @@ function get_image_id() { fi } -function get_image_id_by_revision() { - local revision="$1" - local image_id="" - - image_id=$($CURL "https://api.digitalocean.com/v2/images?per_page=100" \ - | $JSON images \ - | $JSON -c "this.name.indexOf(\"box-${revision}\") === 0" 0.id) - - if [[ -n "${image_id}" ]]; then - echo "${image_id}" - fi -} - -function get_image_name() { - local image_id="$1" - $CURL "https://api.digitalocean.com/v2/images/${image_id}?per_page=100" \ - | $JSON image.name -} - function wait_for_image_event() { local image_id="$1" local event_id="$2" From 5362102be63bf2123f78c0423265fb7b08321d14 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 08:38:56 -0800 Subject: [PATCH 174/234] add --provider --- images/createDigitalOceanImage.sh | 33 +++++++++++++++++++------------ 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/images/createDigitalOceanImage.sh b/images/createDigitalOceanImage.sh index f3424a1dc..b99f9d4b7 100755 --- a/images/createDigitalOceanImage.sh +++ b/images/createDigitalOceanImage.sh @@ -10,6 +10,7 @@ readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" readonly INSTALLER_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)" readonly JSON="${INSTALLER_DIR}/node_modules/.bin/json" +provider="digitalocean" installer_revision=$(git rev-parse HEAD) box_size="512mb" image_regions=(sfo1 ams3) @@ -24,7 +25,7 @@ deploy_env="dev" [[ $(uname -s) == "Darwin" ]] && GNU_GETOPT="/usr/local/opt/gnu-getopt/bin/getopt" || GNU_GETOPT="getopt" readonly GNU_GETOPT -args=$(${GNU_GETOPT} -o "" -l "revision:,regions:,size:,box:,no-destroy,env:" -n "$0" -- "$@") +args=$(${GNU_GETOPT} -o "" -l "provider:,revision:,regions:,size:,box:,no-destroy,env:" -n "$0" -- "$@") eval set -- "${args}" while true; do @@ -33,6 +34,7 @@ while true; do --revision) installer_revision="$2"; shift 2;; --regions) image_regions=("$2"); shift 2;; # parse as whitespace separated array --size) box_size="$2"; shift 2;; + --provider) provider="$2"; shift 2;; --box) box_name="$2"; destroy_server="no"; shift 2;; --no-destroy) destroy_server="no"; shift 2;; --) break;; @@ -40,21 +42,26 @@ while true; do esac done -# set DO token, picked up by digitalOceanFunctions.sh -if [[ "${deploy_env}" == "staging" ]]; then - assertNotEmpty DIGITAL_OCEAN_TOKEN_STAGING - readonly DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_STAGING}" -elif [[ "${deploy_env}" == "dev" ]]; then - assertNotEmpty DIGITAL_OCEAN_TOKEN_DEV - readonly DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_DEV}" -elif [[ "${deploy_env}" == "prod" ]]; then - assertNotEmpty DIGITAL_OCEAN_TOKEN_PROD - readonly DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_PROD}" +if [[ "${provider}" == "digitalocean" ]]; then + # set DO token, picked up by digitalOceanFunctions.sh + if [[ "${deploy_env}" == "staging" ]]; then + assertNotEmpty DIGITAL_OCEAN_TOKEN_STAGING + readonly DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_STAGING}" + elif [[ "${deploy_env}" == "dev" ]]; then + assertNotEmpty DIGITAL_OCEAN_TOKEN_DEV + readonly DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_DEV}" + elif [[ "${deploy_env}" == "prod" ]]; then + assertNotEmpty DIGITAL_OCEAN_TOKEN_PROD + readonly DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_PROD}" + else + echo "No such env ${deploy_env}." + exit 1 + fi + source "${SCRIPT_DIR}/digitalOceanFunctions.sh" else - echo "No such env ${deploy_env}." + echo "Unknown provider : ${provider}" exit 1 fi -source "${SCRIPT_DIR}/digitalOceanFunctions.sh" readonly ssh_keys="${HOME}/.ssh/id_rsa_caas_${deploy_env}" readonly scp202="scp -P 202 -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}" From c9053bb0bc1ade2716e1cb187691ada4912c69ba Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 08:39:21 -0800 Subject: [PATCH 175/234] rename image creation schript --- images/{createDigitalOceanImage.sh => createImage} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename images/{createDigitalOceanImage.sh => createImage} (100%) diff --git a/images/createDigitalOceanImage.sh b/images/createImage similarity index 100% rename from images/createDigitalOceanImage.sh rename to images/createImage From b37670de8407880b2f7f89078a2ee496d68b6e23 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 08:55:19 -0800 Subject: [PATCH 176/234] make DO backend a binary --- images/createImage | 34 ++++++------- ...gitalOceanFunctions.sh => digitalocean.sh} | 49 +++++++++++++++++++ 2 files changed, 66 insertions(+), 17 deletions(-) rename images/{digitalOceanFunctions.sh => digitalocean.sh} (88%) mode change 100644 => 100755 diff --git a/images/createImage b/images/createImage index b99f9d4b7..e31e4c8b0 100755 --- a/images/createImage +++ b/images/createImage @@ -8,7 +8,7 @@ assertNotEmpty() { readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" readonly INSTALLER_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)" -readonly JSON="${INSTALLER_DIR}/node_modules/.bin/json" +export JSON="${INSTALLER_DIR}/node_modules/.bin/json" provider="digitalocean" installer_revision=$(git rev-parse HEAD) @@ -43,21 +43,21 @@ while true; do done if [[ "${provider}" == "digitalocean" ]]; then - # set DO token, picked up by digitalOceanFunctions.sh if [[ "${deploy_env}" == "staging" ]]; then assertNotEmpty DIGITAL_OCEAN_TOKEN_STAGING - readonly DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_STAGING}" + export DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_STAGING}" elif [[ "${deploy_env}" == "dev" ]]; then assertNotEmpty DIGITAL_OCEAN_TOKEN_DEV - readonly DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_DEV}" + export DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_DEV}" elif [[ "${deploy_env}" == "prod" ]]; then assertNotEmpty DIGITAL_OCEAN_TOKEN_PROD - readonly DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_PROD}" + export DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_PROD}" else echo "No such env ${deploy_env}." exit 1 fi - source "${SCRIPT_DIR}/digitalOceanFunctions.sh" + + vps="${SCRIPT_DIR}/digitalocean.sh" else echo "Unknown provider : ${provider}" exit 1 @@ -90,7 +90,7 @@ if [[ -z "${box_name}" ]]; then box_name="box-${deploy_env}-${pretty_revision}-${now}" # remove slashes # create a new server if no name given - caas_ssh_key_id=$(get_ssh_key_id "caas") + caas_ssh_key_id=$($vps get_ssh_key_id "caas") if [[ -z "${caas_ssh_key_id}" ]]; then echo "Could not query caas ssh key" exit 1 @@ -98,7 +98,7 @@ if [[ -z "${box_name}" ]]; then echo "Detected caas ssh key id: ${caas_ssh_key_id}" echo "Creating Server with name [${box_name}] at [${image_regions[0]}] with size [${box_size}]" - server_id=$(create_droplet ${caas_ssh_key_id} ${box_name} ${box_size} ${image_regions[0]}) + server_id=$($vps create_droplet ${caas_ssh_key_id} ${box_name} ${box_size} ${image_regions[0]}) if [[ -z "${server_id}" ]]; then echo "Failed to create server" exit 1 @@ -113,16 +113,16 @@ if [[ -z "${box_name}" ]]; then done echo "" else - server_id=$(get_droplet_id "${box_name}") + server_id=$($vps get_droplet_id "${box_name}") echo "Reusing server with id: ${server_id}" - power_on_droplet "${server_id}" + $vps power_on_droplet "${server_id}" fi # Query DO until we get an IP while true; do echo "Trying to get the server IP" - server_ip=$(get_droplet_ip "${server_id}") + server_ip=$($vps get_droplet_ip "${server_id}") if [[ "${server_ip}" != "" ]]; then echo "Server IP : [${server_ip}]" break @@ -167,18 +167,18 @@ echo "Waiting for 10 seconds for server to shutdown" sleep 30 echo "Powering off server" -power_off_droplet "${server_id}" +$vps power_off_droplet "${server_id}" snapshot_name="box-${deploy_env}-${pretty_revision}-${now}" echo "Snapshotting as ${snapshot_name}" -snapshot_droplet "${server_id}" "${snapshot_name}" +$vps snapshot_droplet "${server_id}" "${snapshot_name}" -image_id=$(get_image_id "${snapshot_name}") +image_id=$($vps get_image_id "${snapshot_name}") echo "Image id is ${image_id}" if [[ "${destroy_server}" == "yes" ]]; then echo "Destroying server" - destroy_droplet "${server_id}" + $vps destroy_droplet "${server_id}" else echo "Skipping server destroy" fi @@ -187,7 +187,7 @@ echo "Transferring image to other regions" xfer_events=() # skip the first region, as the image was created there for image_region in ${image_regions[@]:1}; do - xfer_event=$(transfer_image ${image_id} ${image_region}) + xfer_event=$($vps transfer_image ${image_id} ${image_region}) echo "Image transfer to ${image_region} initiated. Event id: ${xfer_event}" xfer_events+=("${xfer_event}") sleep 1 @@ -196,7 +196,7 @@ done echo "Image transfer initiated, but they will take some time to get transferred." for xfer_event in ${xfer_events[@]}; do - wait_for_image_event "${image_id}" "${xfer_event}" + $vps wait_for_image_event "${image_id}" "${xfer_event}" done echo "Done." diff --git a/images/digitalOceanFunctions.sh b/images/digitalocean.sh old mode 100644 new mode 100755 similarity index 88% rename from images/digitalOceanFunctions.sh rename to images/digitalocean.sh index 63fb18a9e..6499cccd4 --- a/images/digitalOceanFunctions.sh +++ b/images/digitalocean.sh @@ -159,3 +159,52 @@ function wait_for_image_event() { echo "" } +if [[ $# -lt 1 ]]; then + echo " " +fi + +case $1 in +get_ssh_key_id) + get_ssh_key_id "${@:2}" + ;; + +create_droplet) + create_droplet "${@:2}" + ;; + +get_droplet_id) + get_droplet_id "${@:2}" + ;; + +get_droplet_ip) + get_droplet_ip "${@:2}" + ;; + +power_on_droplet) + power_on_droplet "${@:2}" + ;; + +power_off_droplet) + power_off_droplet "${@:2}" + ;; + +snapshot_droplet) + snapshot_droplet "${@:2}" + ;; + +destroy_droplet) + destroy_droplet "${@:2}" + ;; + +wait_for_image_event) + wait_for_image_event "${@:2}" + ;; + +transfer_image) + transfer_image "${@:2}" + ;; + +*) + echo "Unknown command $1" + exit 1 +esac From a6415b8689cb5788ec9d522a5d7b86fad1187bad Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 09:13:30 -0800 Subject: [PATCH 177/234] remove droplet from command names --- images/createImage | 15 +++++++-------- images/digitalocean.sh | 14 +++++++------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/images/createImage b/images/createImage index e31e4c8b0..eb051d4f3 100755 --- a/images/createImage +++ b/images/createImage @@ -98,7 +98,7 @@ if [[ -z "${box_name}" ]]; then echo "Detected caas ssh key id: ${caas_ssh_key_id}" echo "Creating Server with name [${box_name}] at [${image_regions[0]}] with size [${box_size}]" - server_id=$($vps create_droplet ${caas_ssh_key_id} ${box_name} ${box_size} ${image_regions[0]}) + server_id=$($vps create ${caas_ssh_key_id} ${box_name} ${box_size} ${image_regions[0]}) if [[ -z "${server_id}" ]]; then echo "Failed to create server" exit 1 @@ -113,16 +113,16 @@ if [[ -z "${box_name}" ]]; then done echo "" else - server_id=$($vps get_droplet_id "${box_name}") + server_id=$($vps get_id "${box_name}") echo "Reusing server with id: ${server_id}" - $vps power_on_droplet "${server_id}" + $vps power_on "${server_id}" fi # Query DO until we get an IP while true; do echo "Trying to get the server IP" - server_ip=$($vps get_droplet_ip "${server_id}") + server_ip=$($vps get_ip "${server_id}") if [[ "${server_ip}" != "" ]]; then echo "Server IP : [${server_ip}]" break @@ -167,18 +167,18 @@ echo "Waiting for 10 seconds for server to shutdown" sleep 30 echo "Powering off server" -$vps power_off_droplet "${server_id}" +$vps power_off "${server_id}" snapshot_name="box-${deploy_env}-${pretty_revision}-${now}" echo "Snapshotting as ${snapshot_name}" -$vps snapshot_droplet "${server_id}" "${snapshot_name}" +$vps snapshot "${server_id}" "${snapshot_name}" image_id=$($vps get_image_id "${snapshot_name}") echo "Image id is ${image_id}" if [[ "${destroy_server}" == "yes" ]]; then echo "Destroying server" - $vps destroy_droplet "${server_id}" + $vps destroy "${server_id}" else echo "Skipping server destroy" fi @@ -200,4 +200,3 @@ for xfer_event in ${xfer_events[@]}; do done echo "Done." - diff --git a/images/digitalocean.sh b/images/digitalocean.sh index 6499cccd4..29645250c 100755 --- a/images/digitalocean.sh +++ b/images/digitalocean.sh @@ -168,31 +168,31 @@ get_ssh_key_id) get_ssh_key_id "${@:2}" ;; -create_droplet) +create) create_droplet "${@:2}" ;; -get_droplet_id) +get_id) get_droplet_id "${@:2}" ;; -get_droplet_ip) +get_ip) get_droplet_ip "${@:2}" ;; -power_on_droplet) +power_on) power_on_droplet "${@:2}" ;; -power_off_droplet) +power_off) power_off_droplet "${@:2}" ;; -snapshot_droplet) +snapshot) snapshot_droplet "${@:2}" ;; -destroy_droplet) +destroy) destroy_droplet "${@:2}" ;; From 3c85a602a4c513ddf59c171eaf68e055a2fe6d47 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 09:21:10 -0800 Subject: [PATCH 178/234] add vultr backend --- images/createImage | 2 +- images/vultr.js | 67 ++++++++++++++++++++++++++++++++++++++++++++++ package.json | 2 ++ 3 files changed, 70 insertions(+), 1 deletion(-) create mode 100755 images/vultr.js diff --git a/images/createImage b/images/createImage index eb051d4f3..f3bafe819 100755 --- a/images/createImage +++ b/images/createImage @@ -1,6 +1,6 @@ #!/bin/bash -set -eu -o pipefail +set -eux -o pipefail assertNotEmpty() { : "${!1:? "$1 is not set."}" diff --git a/images/vultr.js b/images/vultr.js new file mode 100755 index 000000000..b22072b5e --- /dev/null +++ b/images/vultr.js @@ -0,0 +1,67 @@ +#!/usr/bin/env node + +'use strict'; + +require('colors'); + +var request = require('superagent-sync'); + +function exit(error, result) { + if (error) console.error(error.message.red); + if (result) console.log(result); + + process.exit(error ? 1 : 0); +} + +var gApiToken = process.env.API_TOKEN; +if (!gApiToken) exit('Script requires API_TOKEN env to be set'); + +if (process.argv.length < 3) { + exit('Usage: vultr '); +} + +function getSshKeyId(keyName) { + request.get('https://api.vultr.com/v1/sshkey/list') + .query({ api_key : gApiToken }) + .end(function (error, res) { + + if (error) exit(error); + + var allKeys = Object.keys(res.body); + for (var i = 0; i < allKeys.length; i++) { + if (keyName === allKeys[i]) exit(null, res.body[keyName].key); + } + + exit(new Error('key not found')); + }); +} + +switch (process.argv[2]) { +case 'get_ssh_key_id': + getSshKeyId(process.argv[3]); + break; + +case 'create': + +case 'get_id': + +case 'get_ip': + +case 'power_on': + +case 'power_off': + +case 'snapshot': + +case 'destroy': + +case 'wait_for_image_event': + +case 'transfer_image': + exit(new Error('Unimplemented command:' + process.argv[2])); + + break; + +default: + exit(new Error('Unknown command:' + process.argv[2])); +} diff --git a/package.json b/package.json index 4d3b042e9..b06d2b7cc 100644 --- a/package.json +++ b/package.json @@ -25,12 +25,14 @@ "superagent": "^0.21.0" }, "devDependencies": { + "colors": "^1.1.2", "commander": "^2.8.1", "expect.js": "^0.3.1", "istanbul": "^0.3.5", "lodash": "^3.2.0", "mocha": "^2.1.0", "nock": "^0.59.1", + "superagent-sync": "^0.2.0", "supererror": "^0.7.0", "yesno": "0.0.1" }, From 114951b18c3889d2d0481b57fed5513f73913f60 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 09:31:42 -0800 Subject: [PATCH 179/234] add get_image_id command --- images/digitalocean.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/images/digitalocean.sh b/images/digitalocean.sh index 29645250c..f5089413b 100755 --- a/images/digitalocean.sh +++ b/images/digitalocean.sh @@ -200,6 +200,10 @@ wait_for_image_event) wait_for_image_event "${@:2}" ;; +get_image_id) + get_image_id "${@:2}" + ;; + transfer_image) transfer_image "${@:2}" ;; From 7104a3b7384560e715acee1a88588220d6a1d466 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 10:14:40 -0800 Subject: [PATCH 180/234] use debug to put messages in stderr --- images/digitalocean.sh | 47 +++++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/images/digitalocean.sh b/images/digitalocean.sh index f5089413b..b5cf74f7e 100755 --- a/images/digitalocean.sh +++ b/images/digitalocean.sh @@ -12,6 +12,10 @@ fi readonly CURL="curl -s -u ${DIGITAL_OCEAN_TOKEN}:" +function debug() { + echo "$@" >&2 +} + function get_ssh_key_id() { $CURL "https://api.digitalocean.com/v2/account/keys" \ | $JSON ssh_keys \ @@ -49,23 +53,23 @@ function power_off_droplet() { local event_id=`echo "${response}" | $JSON action.id` if [[ -z "${event_id}" ]]; then - echo "Got no event id, assuming already powered off." - echo "Response: ${response}" + debug "Got no event id, assuming already powered off." + debug "Response: ${response}" return fi - echo "Powered off droplet. Event id: ${event_id}" - echo -n "Waiting for droplet to power off" + debug "Powered off droplet. Event id: ${event_id}" + debug -n "Waiting for droplet to power off" while true; do local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status` if [[ "${event_status}" == "completed" ]]; then break fi - echo -n "." + debug -n "." sleep 10 done - echo "" + debug "" } function power_on_droplet() { @@ -73,24 +77,24 @@ function power_on_droplet() { local data='{"type":"power_on"}' local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions" | $JSON action.id` - echo "Powered on droplet. Event id: ${event_id}" + debug "Powered on droplet. Event id: ${event_id}" if [[ -z "${event_id}" ]]; then - echo "Got no event id, assuming already powered on" + debug "Got no event id, assuming already powered on" return fi - echo -n "Waiting for droplet to power on" + debug -n "Waiting for droplet to power on" while true; do local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status` if [[ "${event_status}" == "completed" ]]; then break fi - echo -n "." + debug -n "." sleep 10 done - echo "" + debug "" } function snapshot_droplet() { @@ -99,26 +103,26 @@ function snapshot_droplet() { local data="{\"type\":\"snapshot\",\"name\":\"${snapshot_name}\"}" local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions" | $JSON action.id` - echo "Droplet snapshotted as ${snapshot_name}. Event id: ${event_id}" - echo -n "Waiting for snapshot to complete" + debug "Droplet snapshotted as ${snapshot_name}. Event id: ${event_id}" + debug -n "Waiting for snapshot to complete" while true; do local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status` if [[ "${event_status}" == "completed" ]]; then break fi - echo -n "." + debug -n "." sleep 10 done - echo "" + debug "" } function destroy_droplet() { local droplet_id="$1" # TODO: check for 204 status $CURL -X DELETE "https://api.digitalocean.com/v2/droplets/${droplet_id}" - echo "Droplet destroyed" - echo "" + debug "Droplet destroyed" + debug "" } function transfer_image() { @@ -146,21 +150,22 @@ function wait_for_image_event() { local image_id="$1" local event_id="$2" - echo -n "Waiting for ${event_id}" + debug -n "Waiting for ${event_id}" while true; do local event_status=`$CURL "https://api.digitalocean.com/v2/images/${image_id}/actions/${event_id}" | $JSON action.status` if [[ "${event_status}" == "completed" ]]; then break fi - echo -n "." + debug -n "." sleep 10 done - echo "" + debug "" } if [[ $# -lt 1 ]]; then - echo " " + debug " " + exit 1 fi case $1 in From f1ec110673dd47e2b909a0cce2deed2a56e18660 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 10:27:27 -0800 Subject: [PATCH 181/234] vultr: getSshKeyId --- images/createImage | 5 ++++- images/vultr.js | 27 +++++++++++++-------------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/images/createImage b/images/createImage index f3bafe819..2da2cb7ac 100755 --- a/images/createImage +++ b/images/createImage @@ -1,6 +1,6 @@ #!/bin/bash -set -eux -o pipefail +set -eu -o pipefail assertNotEmpty() { : "${!1:? "$1 is not set."}" @@ -58,6 +58,9 @@ if [[ "${provider}" == "digitalocean" ]]; then fi vps="${SCRIPT_DIR}/digitalocean.sh" +elif [[ "${provider}" == "vultr" ]]; then + export VULTR_TOKEN="${VULTR_TOKEN}" + vps="${SCRIPT_DIR}/vultr.js" else echo "Unknown provider : ${provider}" exit 1 diff --git a/images/vultr.js b/images/vultr.js index b22072b5e..fb9aff118 100755 --- a/images/vultr.js +++ b/images/vultr.js @@ -13,32 +13,31 @@ function exit(error, result) { process.exit(error ? 1 : 0); } -var gApiToken = process.env.API_TOKEN; -if (!gApiToken) exit('Script requires API_TOKEN env to be set'); +var gApiToken = process.env.VULTR_TOKEN; +if (!gApiToken) exit(new Error('Script requires VULTR_TOKEN env to be set')); if (process.argv.length < 3) { - exit('Usage: vultr '); + exit(new Error('Usage: vultr ')); } -function getSshKeyId(keyName) { - request.get('https://api.vultr.com/v1/sshkey/list') +function getSshKeyId(keyName, callback) { + var res = request.get('https://api.vultr.com/v1/sshkey/list') .query({ api_key : gApiToken }) - .end(function (error, res) { + .end(); - if (error) exit(error); + if (res.statusCode !== 200) exit(new Error('Invalid response')); - var allKeys = Object.keys(res.body); - for (var i = 0; i < allKeys.length; i++) { - if (keyName === allKeys[i]) exit(null, res.body[keyName].key); - } + var allKeyIds = Object.keys(res.body); + for (var i = 0; i < allKeyIds.length; i++) { + if (keyName === res.body[allKeyIds[i]].name) return callback(null, allKeyIds[i]); // also SSHKEYID + } - exit(new Error('key not found')); - }); + callback(new Error('key not found')); } switch (process.argv[2]) { case 'get_ssh_key_id': - getSshKeyId(process.argv[3]); + getSshKeyId(process.argv[3], exit); break; case 'create': From cee9cd14c04e079712db936a9f5c41747977c95c Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 10:46:16 -0800 Subject: [PATCH 182/234] hardcode the box size to smallest --- images/createImage | 6 ++---- images/digitalocean.sh | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/images/createImage b/images/createImage index 2da2cb7ac..4014a4046 100755 --- a/images/createImage +++ b/images/createImage @@ -12,7 +12,6 @@ export JSON="${INSTALLER_DIR}/node_modules/.bin/json" provider="digitalocean" installer_revision=$(git rev-parse HEAD) -box_size="512mb" image_regions=(sfo1 ams3) box_name="" server_id="" @@ -33,7 +32,6 @@ while true; do --env) deploy_env="$2"; shift 2;; --revision) installer_revision="$2"; shift 2;; --regions) image_regions=("$2"); shift 2;; # parse as whitespace separated array - --size) box_size="$2"; shift 2;; --provider) provider="$2"; shift 2;; --box) box_name="$2"; destroy_server="no"; shift 2;; --no-destroy) destroy_server="no"; shift 2;; @@ -100,8 +98,8 @@ if [[ -z "${box_name}" ]]; then fi echo "Detected caas ssh key id: ${caas_ssh_key_id}" - echo "Creating Server with name [${box_name}] at [${image_regions[0]}] with size [${box_size}]" - server_id=$($vps create ${caas_ssh_key_id} ${box_name} ${box_size} ${image_regions[0]}) + echo "Creating Server with name [${box_name}] at [${image_regions[0]}]" + server_id=$($vps create ${caas_ssh_key_id} ${box_name} ${image_regions[0]}) if [[ -z "${server_id}" ]]; then echo "Failed to create server" exit 1 diff --git a/images/digitalocean.sh b/images/digitalocean.sh index b5cf74f7e..812235d2a 100755 --- a/images/digitalocean.sh +++ b/images/digitalocean.sh @@ -26,10 +26,10 @@ function get_ssh_key_id() { function create_droplet() { local ssh_key_id="$1" local box_name="$2" - local box_size="$3" - local image_region="$4" + local image_region="$3" local ubuntu_image_slug="ubuntu-15-04-x64" # id=12658446 + local box_size="512mb" local data="{\"name\":\"${box_name}\",\"size\":\"${box_size}\",\"region\":\"${image_region}\",\"image\":\"${ubuntu_image_slug}\",\"ssh_keys\":[ \"${ssh_key_id}\" ],\"backups\":false}" From dae2d81764a4c0b0c11deebbdf71841d8e691433 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 10:49:09 -0800 Subject: [PATCH 183/234] remove image_region as well --- images/createImage | 7 +++---- images/digitalocean.sh | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/images/createImage b/images/createImage index 4014a4046..c8c2b1c6a 100755 --- a/images/createImage +++ b/images/createImage @@ -12,7 +12,6 @@ export JSON="${INSTALLER_DIR}/node_modules/.bin/json" provider="digitalocean" installer_revision=$(git rev-parse HEAD) -image_regions=(sfo1 ams3) box_name="" server_id="" server_ip="" @@ -31,7 +30,6 @@ while true; do case "$1" in --env) deploy_env="$2"; shift 2;; --revision) installer_revision="$2"; shift 2;; - --regions) image_regions=("$2"); shift 2;; # parse as whitespace separated array --provider) provider="$2"; shift 2;; --box) box_name="$2"; destroy_server="no"; shift 2;; --no-destroy) destroy_server="no"; shift 2;; @@ -98,8 +96,8 @@ if [[ -z "${box_name}" ]]; then fi echo "Detected caas ssh key id: ${caas_ssh_key_id}" - echo "Creating Server with name [${box_name}] at [${image_regions[0]}]" - server_id=$($vps create ${caas_ssh_key_id} ${box_name} ${image_regions[0]}) + echo "Creating Server with name [${box_name}]" + server_id=$($vps create ${caas_ssh_key_id} ${box_name}) if [[ -z "${server_id}" ]]; then echo "Failed to create server" exit 1 @@ -186,6 +184,7 @@ fi echo "Transferring image to other regions" xfer_events=() +image_regions=(sfo1 ams3) # remove this # skip the first region, as the image was created there for image_region in ${image_regions[@]:1}; do xfer_event=$($vps transfer_image ${image_id} ${image_region}) diff --git a/images/digitalocean.sh b/images/digitalocean.sh index 812235d2a..27a19b3b1 100755 --- a/images/digitalocean.sh +++ b/images/digitalocean.sh @@ -26,8 +26,8 @@ function get_ssh_key_id() { function create_droplet() { local ssh_key_id="$1" local box_name="$2" - local image_region="$3" + local image_region="sfo1" local ubuntu_image_slug="ubuntu-15-04-x64" # id=12658446 local box_size="512mb" From ba4edc5c0ef9601c73fed0c6d39dc559ee45ccf1 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 11:01:52 -0800 Subject: [PATCH 184/234] implement some vultr api --- images/vultr.js | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/images/vultr.js b/images/vultr.js index fb9aff118..307ba9db1 100755 --- a/images/vultr.js +++ b/images/vultr.js @@ -35,16 +35,50 @@ function getSshKeyId(keyName, callback) { callback(new Error('key not found')); } +function create(keyId, name, callback) { + var regionId = 5; // LA (https://api.vultr.com/v1/regions/list) + var planId = 29; // 768MB RAM (https://api.vultr.com/v1/regions/list) + var osid = 191; // Ubuntu 15.04 x64 (see https://api.vultr.com/v1/os/list) + + var res = request.post('https://api.vultr.com/v1/server/create') + .query({ api_key : gApiToken }) + .type('form') + .send({ DCID: regionId, VPSPLANID: planId, OSID : osid, label: name, SSHKEYID: keyId }) + .end(); + + if (res.statusCode !== 200) return callback(new Error('Invalid response creating server')); + + return callback(null, res.body.SUBID); +} + +function getIp(id, callback){ + var res = request.post('https://api.vultr.com/v1/server/list') + .query({ api_key : gApiToken }) + .end(); + + var info = res.body[id]; + if (!info) return callback(new Error('Invalid response querying IP')); + + if (info.power_status !== 'running' || info.server_state !== 'ok' || info.status !== 'active') return callback(new Error('Server is not up yet')); + + return callback(null, info.main_ip); +} + switch (process.argv[2]) { case 'get_ssh_key_id': getSshKeyId(process.argv[3], exit); break; case 'create': + create(process.argv[3], process.argv[4], exit); + break; + +case 'get_ip': + getIp(process.argv[3], exit); + break; case 'get_id': -case 'get_ip': case 'power_on': From ef00114aabbb86410ba513749fc00dc89a7c54e9 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 11:20:21 -0800 Subject: [PATCH 185/234] rename arg box to name --- images/createImage | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/images/createImage b/images/createImage index c8c2b1c6a..e7e476f71 100755 --- a/images/createImage +++ b/images/createImage @@ -23,7 +23,7 @@ deploy_env="dev" [[ $(uname -s) == "Darwin" ]] && GNU_GETOPT="/usr/local/opt/gnu-getopt/bin/getopt" || GNU_GETOPT="getopt" readonly GNU_GETOPT -args=$(${GNU_GETOPT} -o "" -l "provider:,revision:,regions:,size:,box:,no-destroy,env:" -n "$0" -- "$@") +args=$(${GNU_GETOPT} -o "" -l "provider:,revision:,regions:,size:,name:,no-destroy,env:" -n "$0" -- "$@") eval set -- "${args}" while true; do @@ -31,7 +31,7 @@ while true; do --env) deploy_env="$2"; shift 2;; --revision) installer_revision="$2"; shift 2;; --provider) provider="$2"; shift 2;; - --box) box_name="$2"; destroy_server="no"; shift 2;; + --name) box_name="$2"; destroy_server="no"; shift 2;; --no-destroy) destroy_server="no"; shift 2;; --) break;; *) echo "Unknown option $1"; exit 1;; @@ -118,7 +118,7 @@ else $vps power_on "${server_id}" fi -# Query DO until we get an IP +# Query until we get an IP while true; do echo "Trying to get the server IP" server_ip=$($vps get_ip "${server_id}") From a6b7b5fa9478d2bf55d5d5737dc70d063f763891 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 11:30:24 -0800 Subject: [PATCH 186/234] complete vultr backend --- images/vultr.js | 84 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 80 insertions(+), 4 deletions(-) diff --git a/images/vultr.js b/images/vultr.js index 307ba9db1..c15c7727e 100755 --- a/images/vultr.js +++ b/images/vultr.js @@ -52,11 +52,15 @@ function create(keyId, name, callback) { } function getIp(id, callback){ - var res = request.post('https://api.vultr.com/v1/server/list') - .query({ api_key : gApiToken }) + console.error('getting the ip of ', id); + + var res = request.get('https://api.vultr.com/v1/server/list') + .query({ api_key : gApiToken, SUBID: id }) .end(); - var info = res.body[id]; + if (res.statusCode !== 200) return callback(new Error('Invalid statusCode querying IP')); + + var info = res.body; if (!info) return callback(new Error('Invalid response querying IP')); if (info.power_status !== 'running' || info.server_state !== 'ok' || info.status !== 'active') return callback(new Error('Server is not up yet')); @@ -64,6 +68,69 @@ function getIp(id, callback){ return callback(null, info.main_ip); } +function getId(name, callback) { + var res = request.get('https://api.vultr.com/v1/server/list') + .query({ api_key : gApiToken }) + .end(); + + if (res.statusCode !== 200) return callback(new Error('Invalid statusCode querying id')); + + var serverIds = Object.keys(res.body); + for (var i = 0; i < serverIds.length; i++) { + if (res.body[serverIds[i]].label === name) return callback(null, serverIds[0]); + } + + callback(new Error('no server with id found')); +} + +function powerOn(id, callback) { + var res = request.post('https://api.vultr.com/v1/server/start') + .query({ api_key : gApiToken }) + .type('form') + .send({ SUBID: id }) + .end(); + + if (res.statusCode !== 200) return callback(new Error('Invalid statusCode powering on')); + + callback(null); +} + +function powerOff(id, callback) { + var res = request.post('https://api.vultr.com/v1/server/halt') + .query({ api_key : gApiToken }) + .type('form') + .send({ SUBID: id }) + .end(); + + if (res.statusCode !== 200) return callback(new Error('Invalid statusCode powering off')); + + callback(null); +} + +function snapshot(id, name, callback) { + var res = request.post('https://api.vultr.com/v1/snapshot/create') + .query({ api_key : gApiToken }) + .type('form') + .send({ SUBID: id, description: name }) + .end(); + + if (res.statusCode !== 200) return callback(new Error('Invalid statusCode powering off')); + + callback(null, res.body.SNAPSHOTID); +} + +function destroy(id, callback) { + var res = request.post('https://api.vultr.com/v1/server/destroy') + .query({ api_key : gApiToken }) + .type('form') + .send({ SUBID: id }) + .end(); + + if (res.statusCode !== 200) return callback(new Error('Invalid statusCode powering off')); + + callback(); +} + switch (process.argv[2]) { case 'get_ssh_key_id': getSshKeyId(process.argv[3], exit); @@ -78,15 +145,24 @@ case 'get_ip': break; case 'get_id': - + getId(process.argv[3], exit); + break; case 'power_on': + powerOn(process.argv[3], exit); + break; case 'power_off': + powerOff(process.argv[3], exit); + break; case 'snapshot': + snapshot(process.argv[3], process.argv[4], exit); + break; case 'destroy': + destroy(process.argv[3], exit); + break; case 'wait_for_image_event': From e982281cd44974501f515bbf76b7329f73ada07d Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 11:32:05 -0800 Subject: [PATCH 187/234] install acl --- images/initializeBaseUbuntuImage.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 638e6d9fa..04d5802ea 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -255,6 +255,7 @@ timedatectl set-ntp 1 timedatectl set-timezone UTC # Give user access to system logs +apt-get -y install acl usermod -a -G systemd-journal ${USER} setfacl -n -m u:${USER}:r /var/log/journal/*/system.journal From 56dd936e9c7612bedb5be963d240d0b8a280f386 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 12:33:31 -0800 Subject: [PATCH 188/234] create systemd log dir if needed --- images/initializeBaseUbuntuImage.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 04d5802ea..7596e423a 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -257,6 +257,9 @@ timedatectl set-timezone UTC # Give user access to system logs apt-get -y install acl usermod -a -G systemd-journal ${USER} +mkdir -p /var/log/journal # in some images, this directory is not created making system log to /run/systemd instead +chown root:systemd-journal /var/log/journal +systemctl restart systemd-journald setfacl -n -m u:${USER}:r /var/log/journal/*/system.journal echo "==== Install ssh ===" From 9abc5bbf969603ae10c5147d19a85304ca2f4b5e Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 12:31:58 -0800 Subject: [PATCH 189/234] better error handling --- images/createImage | 34 +++++++++++++++++++++++----------- images/digitalocean.sh | 18 +++++++++++++----- images/vultr.js | 6 ++---- 3 files changed, 38 insertions(+), 20 deletions(-) diff --git a/images/createImage b/images/createImage index e7e476f71..ec76b96a4 100755 --- a/images/createImage +++ b/images/createImage @@ -89,16 +89,14 @@ if [[ -z "${box_name}" ]]; then box_name="box-${deploy_env}-${pretty_revision}-${now}" # remove slashes # create a new server if no name given - caas_ssh_key_id=$($vps get_ssh_key_id "caas") - if [[ -z "${caas_ssh_key_id}" ]]; then + if ! caas_ssh_key_id=$($vps get_ssh_key_id "caas"); then echo "Could not query caas ssh key" exit 1 fi echo "Detected caas ssh key id: ${caas_ssh_key_id}" echo "Creating Server with name [${box_name}]" - server_id=$($vps create ${caas_ssh_key_id} ${box_name}) - if [[ -z "${server_id}" ]]; then + if ! server_id=$($vps create ${caas_ssh_key_id} ${box_name}); then echo "Failed to create server" exit 1 fi @@ -112,7 +110,10 @@ if [[ -z "${box_name}" ]]; then done echo "" else - server_id=$($vps get_id "${box_name}") + if ! server_id=$($vps get_id "${box_name}"); then + echo "Could not determine id from name" + exit 1 + fi echo "Reusing server with id: ${server_id}" $vps power_on "${server_id}" @@ -121,8 +122,7 @@ fi # Query until we get an IP while true; do echo "Trying to get the server IP" - server_ip=$($vps get_ip "${server_id}") - if [[ "${server_ip}" != "" ]]; then + if server_ip=$($vps get_ip "${server_id}"); then echo "Server IP : [${server_ip}]" break fi @@ -166,18 +166,30 @@ echo "Waiting for 10 seconds for server to shutdown" sleep 30 echo "Powering off server" -$vps power_off "${server_id}" +if ! $vps power_off "${server_id}"; then + echo "Could not power off server" + exit 1 +fi snapshot_name="box-${deploy_env}-${pretty_revision}-${now}" echo "Snapshotting as ${snapshot_name}" -$vps snapshot "${server_id}" "${snapshot_name}" +if ! $vps snapshot "${server_id}" "${snapshot_name}"; then + echo "Could not snapshot" + exit 1 +fi -image_id=$($vps get_image_id "${snapshot_name}") +if ! image_id=$($vps get_image_id "${snapshot_name}"); then + echo "Could not get image id" + exit 1 +fi echo "Image id is ${image_id}" if [[ "${destroy_server}" == "yes" ]]; then echo "Destroying server" - $vps destroy "${server_id}" + if ! $vps destroy "${server_id}"; then + echo "Could not destroy server" + exit 1 + fi else echo "Skipping server destroy" fi diff --git a/images/digitalocean.sh b/images/digitalocean.sh index 27a19b3b1..9ce1b7938 100755 --- a/images/digitalocean.sh +++ b/images/digitalocean.sh @@ -17,10 +17,12 @@ function debug() { } function get_ssh_key_id() { - $CURL "https://api.digitalocean.com/v2/account/keys" \ + id=$($CURL "https://api.digitalocean.com/v2/account/keys" \ | $JSON ssh_keys \ | $JSON -c "this.name === \"$1\"" \ - | $JSON 0.id + | $JSON 0.id) + [[ -z "$id" ]] && exit 1 + echo "$id" } function create_droplet() { @@ -33,17 +35,23 @@ function create_droplet() { local data="{\"name\":\"${box_name}\",\"size\":\"${box_size}\",\"region\":\"${image_region}\",\"image\":\"${ubuntu_image_slug}\",\"ssh_keys\":[ \"${ssh_key_id}\" ],\"backups\":false}" - $CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets" | $JSON droplet.id + id=$($CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets" | $JSON droplet.id) + [[ -z "$id" ]] && exit 1 + echo "$id" } function get_droplet_ip() { local droplet_id="$1" - $CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}" | $JSON "droplet.networks.v4[0].ip_address" + ip=$($CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}" | $JSON "droplet.networks.v4[0].ip_address") + [[ -z "$ip" ]] && exit 1 + echo "$ip" } function get_droplet_id() { local droplet_name="$1" - $CURL "https://api.digitalocean.com/v2/droplets?per_page=100" | $JSON "droplets" | $JSON -c "this.name === '${droplet_name}'" | $JSON "[0].id" + id=$($CURL "https://api.digitalocean.com/v2/droplets?per_page=100" | $JSON "droplets" | $JSON -c "this.name === '${droplet_name}'" | $JSON "[0].id") + [[ -z "$id" ]] && exit 1 + echo "$id" } function power_off_droplet() { diff --git a/images/vultr.js b/images/vultr.js index c15c7727e..46bf953c0 100755 --- a/images/vultr.js +++ b/images/vultr.js @@ -38,7 +38,7 @@ function getSshKeyId(keyName, callback) { function create(keyId, name, callback) { var regionId = 5; // LA (https://api.vultr.com/v1/regions/list) var planId = 29; // 768MB RAM (https://api.vultr.com/v1/regions/list) - var osid = 191; // Ubuntu 15.04 x64 (see https://api.vultr.com/v1/os/list) + var osid = 191; // Ubuntu 15.04 x64 (see https://api.vultr.com/v1/os/list). 15.04 has some systemd issue var res = request.post('https://api.vultr.com/v1/server/create') .query({ api_key : gApiToken }) @@ -51,9 +51,7 @@ function create(keyId, name, callback) { return callback(null, res.body.SUBID); } -function getIp(id, callback){ - console.error('getting the ip of ', id); - +function getIp(id, callback) { var res = request.get('https://api.vultr.com/v1/server/list') .query({ api_key : gApiToken, SUBID: id }) .end(); From 44b728c6601efcd56d02d1b027432a001dbf747d Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 12:45:09 -0800 Subject: [PATCH 190/234] remove get_image_id api --- images/createImage | 10 ++-------- images/digitalocean.sh | 32 +++++++++++++++----------------- 2 files changed, 17 insertions(+), 25 deletions(-) diff --git a/images/createImage b/images/createImage index ec76b96a4..559ef16c8 100755 --- a/images/createImage +++ b/images/createImage @@ -173,17 +173,11 @@ fi snapshot_name="box-${deploy_env}-${pretty_revision}-${now}" echo "Snapshotting as ${snapshot_name}" -if ! $vps snapshot "${server_id}" "${snapshot_name}"; then - echo "Could not snapshot" +if ! image_id=$($vps snapshot "${server_id}" "${snapshot_name}"); then + echo "Could not snapshot and get image id" exit 1 fi -if ! image_id=$($vps get_image_id "${snapshot_name}"); then - echo "Could not get image id" - exit 1 -fi -echo "Image id is ${image_id}" - if [[ "${destroy_server}" == "yes" ]]; then echo "Destroying server" if ! $vps destroy "${server_id}"; then diff --git a/images/digitalocean.sh b/images/digitalocean.sh index 9ce1b7938..3d54e1208 100755 --- a/images/digitalocean.sh +++ b/images/digitalocean.sh @@ -105,6 +105,19 @@ function power_on_droplet() { debug "" } +function get_image_id() { + local snapshot_name="$1" + local image_id="" + + image_id=$($CURL "https://api.digitalocean.com/v2/images?per_page=100" \ + | $JSON images \ + | $JSON -c "this.name === \"${snapshot_name}\"" 0.id) + + if [[ -n "${image_id}" ]]; then + echo "${image_id}" + fi +} + function snapshot_droplet() { local droplet_id="$1" local snapshot_name="$2" @@ -123,6 +136,8 @@ function snapshot_droplet() { sleep 10 done debug "" + + get_image_id "${snapshot_name}" } function destroy_droplet() { @@ -141,19 +156,6 @@ function transfer_image() { echo "${event_id}" } -function get_image_id() { - local snapshot_name="$1" - local image_id="" - - image_id=$($CURL "https://api.digitalocean.com/v2/images?per_page=100" \ - | $JSON images \ - | $JSON -c "this.name === \"${snapshot_name}\"" 0.id) - - if [[ -n "${image_id}" ]]; then - echo "${image_id}" - fi -} - function wait_for_image_event() { local image_id="$1" local event_id="$2" @@ -213,10 +215,6 @@ wait_for_image_event) wait_for_image_event "${@:2}" ;; -get_image_id) - get_image_id "${@:2}" - ;; - transfer_image) transfer_image "${@:2}" ;; From 50f7209ba2d56c296c8526a9001962728686d22f Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 12:46:08 -0800 Subject: [PATCH 191/234] print the provider --- images/createImage | 1 + 1 file changed, 1 insertion(+) diff --git a/images/createImage b/images/createImage index 559ef16c8..9d0fbf914 100755 --- a/images/createImage +++ b/images/createImage @@ -38,6 +38,7 @@ while true; do esac done +echo "Creating image using ${provider}" if [[ "${provider}" == "digitalocean" ]]; then if [[ "${deploy_env}" == "staging" ]]; then assertNotEmpty DIGITAL_OCEAN_TOKEN_STAGING From 989730d402f68b4d001da5568728944bd8a55921 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 13:15:56 -0800 Subject: [PATCH 192/234] wait for snapshot --- images/vultr.js | 25 ++++++++++++++++++++++++- package.json | 1 + 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/images/vultr.js b/images/vultr.js index 46bf953c0..79d2d8291 100755 --- a/images/vultr.js +++ b/images/vultr.js @@ -4,7 +4,8 @@ require('colors'); -var request = require('superagent-sync'); +var request = require('superagent-sync'), + sleep = require('sleep').sleep; function exit(error, result) { if (error) console.error(error.message.red); @@ -105,6 +106,23 @@ function powerOff(id, callback) { callback(null); } +function waitForSnapshot(id) { + var res = request.post('https://api.vultr.com/v1/snapshot/list') + .query({ api_key : gApiToken }) + .end(); + + if (res.statusCode !== 200) { + console.error('Invalid statusCode waiting for snapshot'); + return false; + } + + if (res.body[id].status === 'complete') return true; + + console.error('snapshot not complete : ' + res.body[id].status); + + return false; +} + function snapshot(id, name, callback) { var res = request.post('https://api.vultr.com/v1/snapshot/create') .query({ api_key : gApiToken }) @@ -114,6 +132,11 @@ function snapshot(id, name, callback) { if (res.statusCode !== 200) return callback(new Error('Invalid statusCode powering off')); + for (var i = 0; i < 200; i++) { + if (waitForSnapshot(res.body.SNAPSHOTID)) break; + sleep(10); + } + callback(null, res.body.SNAPSHOTID); } diff --git a/package.json b/package.json index b06d2b7cc..46563240a 100644 --- a/package.json +++ b/package.json @@ -32,6 +32,7 @@ "lodash": "^3.2.0", "mocha": "^2.1.0", "nock": "^0.59.1", + "sleep": "^3.0.0", "superagent-sync": "^0.2.0", "supererror": "^0.7.0", "yesno": "0.0.1" From 3b2683463de0067ff6670db13f56f03e6cc02f3e Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 13:35:05 -0800 Subject: [PATCH 193/234] localize transfer logic for DO --- images/createImage | 18 ++---------------- images/digitalocean.sh | 27 +++++++++++++++++++++------ 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/images/createImage b/images/createImage index 9d0fbf914..baa4989ae 100755 --- a/images/createImage +++ b/images/createImage @@ -189,21 +189,7 @@ else echo "Skipping server destroy" fi -echo "Transferring image to other regions" -xfer_events=() -image_regions=(sfo1 ams3) # remove this -# skip the first region, as the image was created there -for image_region in ${image_regions[@]:1}; do - xfer_event=$($vps transfer_image ${image_id} ${image_region}) - echo "Image transfer to ${image_region} initiated. Event id: ${xfer_event}" - xfer_events+=("${xfer_event}") - sleep 1 -done - -echo "Image transfer initiated, but they will take some time to get transferred." - -for xfer_event in ${xfer_events[@]}; do - $vps wait_for_image_event "${image_id}" "${xfer_event}" -done +echo "Transferring image ${image_id} to other regions" +$vps transer_image_to_all_regions "${image_id}" echo "Done." diff --git a/images/digitalocean.sh b/images/digitalocean.sh index 3d54e1208..f770d7466 100755 --- a/images/digitalocean.sh +++ b/images/digitalocean.sh @@ -173,6 +173,25 @@ function wait_for_image_event() { debug "" } +function transfer_image_to_all_regions() { + local image_id="$1" + + xfer_events=() + image_regions=(ams3) ## sfo1 is where the image is created + for image_region in ${image_regions[@]}; do + xfer_event=$(transfer_image ${image_id} ${image_region}) + echo "Image transfer to ${image_region} initiated. Event id: ${xfer_event}" + xfer_events+=("${xfer_event}") + sleep 1 + done + + echo "Image transfer initiated, but they will take some time to get transferred." + + for xfer_event in ${xfer_events[@]}; do + $vps wait_for_image_event "${image_id}" "${xfer_event}" + done +} + if [[ $# -lt 1 ]]; then debug " " exit 1 @@ -211,12 +230,8 @@ destroy) destroy_droplet "${@:2}" ;; -wait_for_image_event) - wait_for_image_event "${@:2}" - ;; - -transfer_image) - transfer_image "${@:2}" +transfer_image_to_all_regions) + transfer_image_to_all_regions "${@:2}" ;; *) From d136b2065f77ba79c8529541d18821d23e896ac8 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 13:35:36 -0800 Subject: [PATCH 194/234] ignore vultr transfer image call --- images/vultr.js | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/images/vultr.js b/images/vultr.js index 79d2d8291..992c3b7bb 100755 --- a/images/vultr.js +++ b/images/vultr.js @@ -185,11 +185,8 @@ case 'destroy': destroy(process.argv[3], exit); break; -case 'wait_for_image_event': - -case 'transfer_image': - exit(new Error('Unimplemented command:' + process.argv[2])); - +case 'transfer_image_to_all_regions': + // nothing to do? break; default: From bc7b8aadc4a8ac874ae16486d30da68166c9fc3c Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 13:39:02 -0800 Subject: [PATCH 195/234] vultr: fix waitForSnapshot call --- images/vultr.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/vultr.js b/images/vultr.js index 992c3b7bb..8aa47bfc1 100755 --- a/images/vultr.js +++ b/images/vultr.js @@ -107,7 +107,7 @@ function powerOff(id, callback) { } function waitForSnapshot(id) { - var res = request.post('https://api.vultr.com/v1/snapshot/list') + var res = request.get('https://api.vultr.com/v1/snapshot/list') .query({ api_key : gApiToken }) .end(); From 606885b23c5d39d1fd6488309d39e43dfa6455a0 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Mon, 23 Nov 2015 13:51:14 -0800 Subject: [PATCH 196/234] fix typo --- images/createImage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/createImage b/images/createImage index baa4989ae..398ded5b3 100755 --- a/images/createImage +++ b/images/createImage @@ -190,6 +190,6 @@ else fi echo "Transferring image ${image_id} to other regions" -$vps transer_image_to_all_regions "${image_id}" +$vps transfer_image_to_all_regions "${image_id}" echo "Done." From b389d30728b252b6d421917557e330d2e9141458 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Sat, 12 Dec 2015 17:36:03 -0800 Subject: [PATCH 197/234] max-time is per retry. it cannot take more than 3 mins to download the tarball --- src/scripts/installer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/scripts/installer.sh b/src/scripts/installer.sh index 26286b8f4..8f4aa136d 100755 --- a/src/scripts/installer.sh +++ b/src/scripts/installer.sh @@ -7,7 +7,7 @@ readonly DATA_DIR=/home/yellowtent/data readonly script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" readonly json="${script_dir}/../../node_modules/.bin/json" -readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400" +readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 180" readonly is_update=$([[ -d "${BOX_SRC_DIR}" ]] && echo "yes" || echo "no") From 9dad7ff563632da3d2e65e1b0b8f50ebcc9b1194 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 15 Dec 2015 14:43:01 -0800 Subject: [PATCH 198/234] Fix sed --- images/initializeBaseUbuntuImage.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 7596e423a..a735f7f9b 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -243,8 +243,8 @@ EOF systemctl enable box-setup # Configure systemd -sed -e "s/^#SystemMaxUse=/SystemMaxUse=100M/" \ - -e "s/^#ForwardToSyslog=/ForwardToSyslog=no/" \ +sed -e "s/^#SystemMaxUse=.*$/SystemMaxUse=100M/" \ + -e "s/^#ForwardToSyslog=.*$/ForwardToSyslog=no/" \ -i /etc/systemd/journald.conf sync From b533d325a48ee4d3add7bc1504fdbfec7f362e17 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 15 Dec 2015 14:48:49 -0800 Subject: [PATCH 199/234] Creating containers fails sporadically HTTP code is 500 which indicates error: server error - Cannot start container redis-9d0ae0eb-a08f-4d0d-a980-ac6fa15d1a3d: [8] System error: write /sys/fs/cgroup/memory/system.slice/docker-fa6d6f3fce88f15844710e6ce4a8ac4d3a42e329437501416991b4c55ea3d078.scope/memory.memsw.limit_in_bytes: invalid argument https://github.com/docker/docker/issues/16256 https://github.com/docker/docker/pull/17704 https://github.com/docker/docker/issues/17653 --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index a735f7f9b..e34224009 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -92,7 +92,7 @@ After=network.target docker.socket Requires=docker.socket [Service] -ExecStart=/usr/bin/docker -d -H fd:// -s btrfs -g /home/yellowtent/data/docker --log-driver=journald +ExecStart=/usr/bin/docker -d -H fd:// -s btrfs -g /home/yellowtent/data/docker --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs MountFlags=slave LimitNOFILE=1048576 LimitNPROC=1048576 From 72cc318607a7c88b1bc6fb3a1818f4f8a6fa57dc Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 15 Dec 2015 17:17:26 -0800 Subject: [PATCH 200/234] install docker 1.9.1 We hit this error: https://github.com/docker/docker/issues/18283 https://github.com/docker/docker/issues/17083 --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index e34224009..e5e5d0b5b 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -68,7 +68,7 @@ apt-get -y install btrfs-tools echo "==== Install docker ====" # install docker from binary to pin it to a specific version. the current debian repo does not allow pinning -curl https://get.docker.com/builds/Linux/x86_64/docker-1.9.0 > /usr/bin/docker +curl https://get.docker.com/builds/Linux/x86_64/docker-1.9.1 > /usr/bin/docker chmod +x /usr/bin/docker groupadd docker cat > /etc/systemd/system/docker.socket < Date: Thu, 26 Nov 2015 10:15:21 +0100 Subject: [PATCH 201/234] Use multipart upload for s3 by reducing the chunk size This avoids file upload issues for larger files --- images/createBoxTarball | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/createBoxTarball b/images/createBoxTarball index f50aa5f89..e15237809 100755 --- a/images/createBoxTarball +++ b/images/createBoxTarball @@ -99,7 +99,7 @@ rm -rf "${bundle_dir}" if [[ "${upload}" == "yes" ]]; then echo "Uploading bundle to S3" # That special header is needed to allow access with singed urls created with different aws credentials than the ones the file got uploaded - s3cmd --ssl --add-header=x-amz-acl:authenticated-read --access_key="${AWS_DEV_ACCESS_KEY}" --secret_key="${AWS_DEV_SECRET_KEY}" --no-mime-magic put "${bundle_file}" "s3://dev-cloudron-releases/box-${version}.tar.gz" + s3cmd --multipart-chunk-size-mb=5 --ssl --add-header=x-amz-acl:authenticated-read --access_key="${AWS_DEV_ACCESS_KEY}" --secret_key="${AWS_DEV_SECRET_KEY}" --no-mime-magic put "${bundle_file}" "s3://dev-cloudron-releases/box-${version}.tar.gz" versions_file_url="https://dev-cloudron-releases.s3.amazonaws.com/box-${version}.tar.gz" echo "The URL for the versions file is: ${versions_file_url}" From 20ac2ff6e75cd5ab228b57d7ccd0b91dcfa5ed7c Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Sat, 19 Dec 2015 21:56:00 +0100 Subject: [PATCH 202/234] Do not move ssh port in selfhosting case --- images/initializeBaseUbuntuImage.sh | 37 +++++++++++++++++++---------- 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index e5e5d0b5b..7f71d9cf2 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -6,12 +6,19 @@ readonly USER=yellowtent readonly USER_HOME="/home/${USER}" readonly INSTALLER_SOURCE_DIR="${USER_HOME}/installer" readonly INSTALLER_REVISION="$1" +readonly SELFHOSTED=$(( $# > 1 ? 1 : 0 )) readonly USER_DATA_FILE="/root/user_data.img" readonly USER_DATA_DIR="/home/yellowtent/data" readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "${SOURCE_DIR}/INFRA_VERSION" +if [ ${SELFHOSTED} == 0 ]; then + echo "!! Initializing Ubuntu image for CaaS" +else + echo "!! Initializing Ubuntu image for Selfhosting" +fi + echo "==== Create User ${USER} ====" if ! id "${USER}"; then useradd "${USER}" -m @@ -40,7 +47,11 @@ iptables -P OUTPUT ACCEPT # NOTE: keep these in sync with src/apps.js validatePortBindings # allow ssh, http, https, ping, dns iptables -I INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT -iptables -A INPUT -p tcp -m tcp -m multiport --dports 80,202,443,886 -j ACCEPT +if [ ${SELFHOSTED} == 0 ]; then + iptables -A INPUT -p tcp -m tcp -m multiport --dports 80,202,443,886 -j ACCEPT +else + iptables -A INPUT -p tcp -m tcp -m multiport --dports 80,22,443,886 -j ACCEPT +fi iptables -A INPUT -p icmp --icmp-type echo-request -j ACCEPT iptables -A INPUT -p icmp --icmp-type echo-reply -j ACCEPT iptables -A INPUT -p udp --sport 53 -j ACCEPT @@ -182,7 +193,7 @@ mkdir -p /usr/local/node-4.1.1 curl -sL https://nodejs.org/dist/v4.1.1/node-v4.1.1-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-4.1.1 ln -s /usr/local/node-4.1.1/bin/node /usr/bin/node ln -s /usr/local/node-4.1.1/bin/npm /usr/bin/npm -apt-get install -y python # Install python which is required for npm rebuild +apt-get install -y python # Install python which is required for npm rebuild echo "=== Rebuilding npm packages ===" cd "${INSTALLER_SOURCE_DIR}" && npm install --production @@ -262,14 +273,16 @@ chown root:systemd-journal /var/log/journal systemctl restart systemd-journald setfacl -n -m u:${USER}:r /var/log/journal/*/system.journal -echo "==== Install ssh ===" -apt-get -y install openssh-server -# https://stackoverflow.com/questions/4348166/using-with-sed on why ? must be escaped -sed -e 's/^#\?Port .*/Port 202/g' \ - -e 's/^#\?PermitRootLogin .*/PermitRootLogin without-password/g' \ - -e 's/^#\?PermitEmptyPasswords .*/PermitEmptyPasswords no/g' \ - -e 's/^#\?PasswordAuthentication .*/PasswordAuthentication no/g' \ - -i /etc/ssh/sshd_config +if [ ${SELFHOSTED} == 0 ]; then + echo "==== Install ssh ===" + apt-get -y install openssh-server + # https://stackoverflow.com/questions/4348166/using-with-sed on why ? must be escaped + sed -e 's/^#\?Port .*/Port 202/g' \ + -e 's/^#\?PermitRootLogin .*/PermitRootLogin without-password/g' \ + -e 's/^#\?PermitEmptyPasswords .*/PermitEmptyPasswords no/g' \ + -e 's/^#\?PasswordAuthentication .*/PasswordAuthentication no/g' \ + -i /etc/ssh/sshd_config - # required so we can connect to this machine since port 22 is blocked by iptables by now -systemctl reload sshd + # required so we can connect to this machine since port 22 is blocked by iptables by now + systemctl reload sshd +fi From a2d0ac7ee30abf7a8fed5bd5af7070dd290ad51b Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Sun, 20 Dec 2015 10:22:55 +0100 Subject: [PATCH 203/234] Run installer with selfhost flag --- images/initializeBaseUbuntuImage.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 7f71d9cf2..6454403ba 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -200,6 +200,12 @@ cd "${INSTALLER_SOURCE_DIR}" && npm install --production chown "${USER}:${USER}" -R "${INSTALLER_SOURCE_DIR}" echo "==== Install installer systemd script ====" +if [ ${SELFHOSTED} == 1]; then + $selfhostEnv="SELFHOSTED=true" +else + $selfhostEnv="" +fi + cat > /etc/systemd/system/cloudron-installer.service < Date: Sun, 20 Dec 2015 10:23:25 +0100 Subject: [PATCH 204/234] Decide if we run in caas or selfhost mode and fetch user data accordingly --- src/server.js | 42 +++++++++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/src/server.js b/src/server.js index c098b2171..c150e7056 100755 --- a/src/server.js +++ b/src/server.js @@ -25,10 +25,12 @@ exports = module.exports = { stop: stop }; +var PROVISION_CONFIG_FILE = '/root/provision.json'; + var gHttpsServer = null, // provision server; used for install/restore gHttpServer = null; // update server; used for updates -function provision(callback) { +function provisionCaaS(callback) { if (fs.existsSync('/home/yellowtent/configs/cloudron.conf')) return callback(null); // already provisioned superagent.get('http://169.254.169.254/metadata/v1.json').end(function (error, result) { @@ -43,6 +45,19 @@ function provision(callback) { }); } +function provisionSelfhost(callback) { + if (fs.existsSync('/home/yellowtent/configs/cloudron.conf')) return callback(null); // already provisioned + + if (!fs.existsSync(PROVISION_CONFIG_FILE)) { + console.error('No provisioning data found at %s', PROVISION_CONFIG_FILE); + return; + } + + var userData = require(PROVISION_CONFIG_FILE); + + installer.provision(userData, callback); +} + function update(req, res, next) { assert.strictEqual(typeof req.body, 'object'); @@ -159,13 +174,26 @@ function stopUpdateServer(callback) { function start(callback) { assert.strictEqual(typeof callback, 'function'); - debug('starting'); + var actions; - async.series([ - startUpdateServer, - startProvisionServer, - provision - ], callback); + if (process.env.SELFHOSTED) { + debug('Starting Installer in selfhost mode'); + + actions = [ + startUpdateServer, + provisionSelfhost + ]; + } else { + debug('Starting Installer in managed mode'); + + actions = [ + startUpdateServer, + startProvisionServer, + provisionCaaS + ]; + } + + async.series(actions, callback); } function stop(callback) { From 3e2ce9e94cca9a9167582ec32cc93aec083dcf19 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Sun, 20 Dec 2015 10:25:12 +0100 Subject: [PATCH 205/234] make cloudron.conf file path a 'const' --- src/server.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/server.js b/src/server.js index c150e7056..af0b409f0 100755 --- a/src/server.js +++ b/src/server.js @@ -26,12 +26,13 @@ exports = module.exports = { }; var PROVISION_CONFIG_FILE = '/root/provision.json'; +var CLOUDRON_CONFIG_FILE = '/home/yellowtent/configs/cloudron.conf'; var gHttpsServer = null, // provision server; used for install/restore gHttpServer = null; // update server; used for updates function provisionCaaS(callback) { - if (fs.existsSync('/home/yellowtent/configs/cloudron.conf')) return callback(null); // already provisioned + if (fs.existsSync(CLOUDRON_CONFIG_FILE)) return callback(null); // already provisioned superagent.get('http://169.254.169.254/metadata/v1.json').end(function (error, result) { if (error || result.statusCode !== 200) { @@ -46,7 +47,7 @@ function provisionCaaS(callback) { } function provisionSelfhost(callback) { - if (fs.existsSync('/home/yellowtent/configs/cloudron.conf')) return callback(null); // already provisioned + if (fs.existsSync(CLOUDRON_CONFIG_FILE)) return callback(null); // already provisioned if (!fs.existsSync(PROVISION_CONFIG_FILE)) { console.error('No provisioning data found at %s', PROVISION_CONFIG_FILE); From ce48a2fc12f5d8794fc769fad923da76349fad66 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Sun, 20 Dec 2015 11:48:37 +0100 Subject: [PATCH 206/234] Some small fixes for selfhost --- images/initializeBaseUbuntuImage.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 6454403ba..872767677 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -200,10 +200,9 @@ cd "${INSTALLER_SOURCE_DIR}" && npm install --production chown "${USER}:${USER}" -R "${INSTALLER_SOURCE_DIR}" echo "==== Install installer systemd script ====" +selfhostEnv="" if [ ${SELFHOSTED} == 1]; then - $selfhostEnv="SELFHOSTED=true" -else - $selfhostEnv="" + selfhostEnv="SELFHOSTED=true" fi cat > /etc/systemd/system/cloudron-installer.service < Date: Sun, 20 Dec 2015 14:58:05 +0100 Subject: [PATCH 207/234] Fix typo --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 872767677..e0891ad98 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -201,7 +201,7 @@ chown "${USER}:${USER}" -R "${INSTALLER_SOURCE_DIR}" echo "==== Install installer systemd script ====" selfhostEnv="" -if [ ${SELFHOSTED} == 1]; then +if [ ${SELFHOSTED} == 1 ]; then selfhostEnv="SELFHOSTED=true" fi From 87e0876ccea200003865db5873d4132de45fa1b3 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Wed, 23 Dec 2015 13:27:33 +0100 Subject: [PATCH 208/234] Only pull infra images if we have an INFRA_VERSION file --- images/initializeBaseUbuntuImage.sh | 40 ++++++++++++++++++----------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index e0891ad98..f43a4a0cf 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -11,7 +11,12 @@ readonly USER_DATA_FILE="/root/user_data.img" readonly USER_DATA_DIR="/home/yellowtent/data" readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -source "${SOURCE_DIR}/INFRA_VERSION" + +if [ -f "${SOURCE_DIR}/INFRA_VERSION" ]; then + source "${SOURCE_DIR}/INFRA_VERSION" +else + echo "No INFRA_VERSION found, skip pulling docker images" +fi if [ ${SELFHOSTED} == 0 ]; then echo "!! Initializing Ubuntu image for CaaS" @@ -137,26 +142,31 @@ update-grub # now add the user to the docker group usermod "${USER}" -a -G docker -echo "=== Pulling base docker images ===" -docker pull "${BASE_IMAGE}" -echo "=== Pulling mysql addon image ===" -docker pull "${MYSQL_IMAGE}" +if [ -z $(echo "${INFRA_VERSION}") ]; then + echo "Skip pulling base docker images" +else + echo "=== Pulling base docker images ===" + docker pull "${BASE_IMAGE}" -echo "=== Pulling postgresql addon image ===" -docker pull "${POSTGRESQL_IMAGE}" + echo "=== Pulling mysql addon image ===" + docker pull "${MYSQL_IMAGE}" -echo "=== Pulling redis addon image ===" -docker pull "${REDIS_IMAGE}" + echo "=== Pulling postgresql addon image ===" + docker pull "${POSTGRESQL_IMAGE}" -echo "=== Pulling mongodb addon image ===" -docker pull "${MONGODB_IMAGE}" + echo "=== Pulling redis addon image ===" + docker pull "${REDIS_IMAGE}" -echo "=== Pulling graphite docker images ===" -docker pull "${GRAPHITE_IMAGE}" + echo "=== Pulling mongodb addon image ===" + docker pull "${MONGODB_IMAGE}" -echo "=== Pulling mail relay ===" -docker pull "${MAIL_IMAGE}" + echo "=== Pulling graphite docker images ===" + docker pull "${GRAPHITE_IMAGE}" + + echo "=== Pulling mail relay ===" + docker pull "${MAIL_IMAGE}" +fi echo "==== Install nginx ====" apt-get -y install nginx-full From b4ba001617eb56858dfa1ca9fd2784ddc5e1ae8e Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Thu, 26 Nov 2015 10:15:21 +0100 Subject: [PATCH 209/234] Use multipart upload for s3 by reducing the chunk size This avoids file upload issues for larger files --- images/createBoxTarball | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/createBoxTarball b/images/createBoxTarball index f50aa5f89..e15237809 100755 --- a/images/createBoxTarball +++ b/images/createBoxTarball @@ -99,7 +99,7 @@ rm -rf "${bundle_dir}" if [[ "${upload}" == "yes" ]]; then echo "Uploading bundle to S3" # That special header is needed to allow access with singed urls created with different aws credentials than the ones the file got uploaded - s3cmd --ssl --add-header=x-amz-acl:authenticated-read --access_key="${AWS_DEV_ACCESS_KEY}" --secret_key="${AWS_DEV_SECRET_KEY}" --no-mime-magic put "${bundle_file}" "s3://dev-cloudron-releases/box-${version}.tar.gz" + s3cmd --multipart-chunk-size-mb=5 --ssl --add-header=x-amz-acl:authenticated-read --access_key="${AWS_DEV_ACCESS_KEY}" --secret_key="${AWS_DEV_SECRET_KEY}" --no-mime-magic put "${bundle_file}" "s3://dev-cloudron-releases/box-${version}.tar.gz" versions_file_url="https://dev-cloudron-releases.s3.amazonaws.com/box-${version}.tar.gz" echo "The URL for the versions file is: ${versions_file_url}" From 49710618ff9616954c4e4cf80d6c880e0cdceef9 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Wed, 23 Dec 2015 13:28:11 +0100 Subject: [PATCH 210/234] Docker does not have a -d option anymore This was depricated in 1.8 and is now gone https://github.com/docker/docker/blob/master/CHANGELOG.md#cli --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index e5e5d0b5b..8b6de6574 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -92,7 +92,7 @@ After=network.target docker.socket Requires=docker.socket [Service] -ExecStart=/usr/bin/docker -d -H fd:// -s btrfs -g /home/yellowtent/data/docker --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs +ExecStart=/usr/bin/docker daemon -H fd:// -s btrfs -g /home/yellowtent/data/docker --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs MountFlags=slave LimitNOFILE=1048576 LimitNPROC=1048576 From 08ba6ac831ead3cb3c69802d289e5fcdfe4e9785 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Wed, 23 Dec 2015 13:28:11 +0100 Subject: [PATCH 211/234] Docker does not have a -d option anymore This was depricated in 1.8 and is now gone https://github.com/docker/docker/blob/master/CHANGELOG.md#cli --- images/initializeBaseUbuntuImage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index f43a4a0cf..a08840f58 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -108,7 +108,7 @@ After=network.target docker.socket Requires=docker.socket [Service] -ExecStart=/usr/bin/docker -d -H fd:// -s btrfs -g /home/yellowtent/data/docker --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs +ExecStart=/usr/bin/docker daemon -H fd:// -s btrfs -g /home/yellowtent/data/docker --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs MountFlags=slave LimitNOFILE=1048576 LimitNPROC=1048576 From 3eb1fe5e4b77d5b5f6c5f3fdab51d5309b84d22d Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Wed, 23 Dec 2015 13:58:35 +0100 Subject: [PATCH 212/234] Ensure we reload the systemd daemon to pickup the new service files --- images/initializeBaseUbuntuImage.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index a08840f58..5fd11543e 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -126,6 +126,7 @@ mkdir -p "${USER_DATA_DIR}" && mount "${USER_DATA_FILE}" mkdir -p "${USER_DATA_DIR}/docker" # give docker sometime to start up and create iptables rules +systemctl daemon-reload systemctl enable docker systemctl start docker sleep 10 @@ -229,7 +230,6 @@ Restart=on-failure [Install] WantedBy=multi-user.target EOF -systemctl enable cloudron-installer # Restore iptables before docker echo "==== Install iptables-restore systemd script ====" @@ -246,7 +246,6 @@ RemainAfterExit=yes [Install] WantedBy=multi-user.target EOF -systemctl enable iptables-restore # Allocate swap files # https://bbs.archlinux.org/viewtopic.php?id=194792 ensures this runs after do-resize.service @@ -266,6 +265,9 @@ RemainAfterExit=yes WantedBy=multi-user.target EOF +systemctl daemon-reload +systemctl enable cloudron-installer +systemctl enable iptables-restore systemctl enable box-setup # Configure systemd From 3ee14879857a017d72762a661009319ef957bd87 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Tue, 29 Dec 2015 17:57:11 +0100 Subject: [PATCH 213/234] We might support more than just caas and selfhosted --- images/initializeBaseUbuntuImage.sh | 6 +++--- src/server.js | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 5fd11543e..a484a8cc1 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -211,9 +211,9 @@ cd "${INSTALLER_SOURCE_DIR}" && npm install --production chown "${USER}:${USER}" -R "${INSTALLER_SOURCE_DIR}" echo "==== Install installer systemd script ====" -selfhostEnv="" +provisionEnv="digitalocean" if [ ${SELFHOSTED} == 1 ]; then - selfhostEnv="SELFHOSTED=true" + provisionEnv="PROVISION=local" fi cat > /etc/systemd/system/cloudron-installer.service < Date: Thu, 31 Dec 2015 09:37:55 +0100 Subject: [PATCH 214/234] Include the image scripts in the installer tar --- .gitattributes | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitattributes b/.gitattributes index ef7233d0e..fec865aa3 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,6 +1,5 @@ # following files are skipped when exporting using git archive /release export-ignore -/images export-ignore /admin export-ignore test export-ignore .gitattributes export-ignore From b4bef441357fb643fbc02027cb90b548c75b2a67 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Fri, 1 Jan 2016 15:38:58 +0100 Subject: [PATCH 215/234] Do not put docker images into te btrfs volume --- images/initializeBaseUbuntuImage.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index a484a8cc1..fab507da4 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -108,7 +108,7 @@ After=network.target docker.socket Requires=docker.socket [Service] -ExecStart=/usr/bin/docker daemon -H fd:// -s btrfs -g /home/yellowtent/data/docker --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs +ExecStart=/usr/bin/docker daemon -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs MountFlags=slave LimitNOFILE=1048576 LimitNPROC=1048576 @@ -123,7 +123,6 @@ fallocate -l "8192m" "${USER_DATA_FILE}" # 8gb start mkfs.btrfs -L UserHome "${USER_DATA_FILE}" echo "${USER_DATA_FILE} ${USER_DATA_DIR} btrfs loop,nosuid 0 0" >> /etc/fstab mkdir -p "${USER_DATA_DIR}" && mount "${USER_DATA_FILE}" -mkdir -p "${USER_DATA_DIR}/docker" # give docker sometime to start up and create iptables rules systemctl daemon-reload From 32f49d2122ade2bd1867d3b09c153b4ced23955c Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Fri, 1 Jan 2016 16:29:16 +0100 Subject: [PATCH 216/234] Use 10GB for system, since it now includes docker images --- systemd/box-setup.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/systemd/box-setup.sh b/systemd/box-setup.sh index 4cded1cb4..0aa6effbd 100755 --- a/systemd/box-setup.sh +++ b/systemd/box-setup.sh @@ -15,7 +15,8 @@ readonly app_count=$((${physical_memory} / 200)) # estimated app count readonly disk_size_gb=$(fdisk -l /dev/vda1 | grep 'Disk /dev/vda1' | awk '{ print $3 }') readonly disk_size=$((disk_size_gb * 1024)) readonly backup_swap_size=1024 -readonly system_size=5120 # 5 gigs for system libs, installer, box code and tmp +# readonly system_size=5120 # 5 gigs for system libs, installer, box code and tmp +readonly system_size=10240 # 10 gigs for system libs, apps images, installer, box code and tmp readonly ext4_reserved=$((disk_size * 5 / 100)) # this can be changes using tune2fs -m percent /dev/vda1 echo "Physical memory: ${physical_memory}" From e3253aacdbb110948242620bfe27ed7bb48470bc Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Sat, 2 Jan 2016 15:28:49 +0100 Subject: [PATCH 217/234] Add semver --- npm-shrinkwrap.json | 75 ++++++++++++++++++++++++--------------------- package.json | 1 + 2 files changed, 41 insertions(+), 35 deletions(-) diff --git a/npm-shrinkwrap.json b/npm-shrinkwrap.json index 4149f26fb..c50935343 100644 --- a/npm-shrinkwrap.json +++ b/npm-shrinkwrap.json @@ -409,46 +409,26 @@ "from": "safetydance@0.0.19", "resolved": "https://registry.npmjs.org/safetydance/-/safetydance-0.0.19.tgz" }, + "semver": { + "version": "5.1.0", + "from": "semver@>=5.1.0 <6.0.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.1.0.tgz" + }, "superagent": { "version": "0.21.0", "from": "superagent@>=0.21.0 <0.22.0", "resolved": "https://registry.npmjs.org/superagent/-/superagent-0.21.0.tgz", "dependencies": { - "qs": { - "version": "1.2.0", - "from": "qs@1.2.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-1.2.0.tgz" - }, - "formidable": { - "version": "1.0.14", - "from": "formidable@1.0.14", - "resolved": "https://registry.npmjs.org/formidable/-/formidable-1.0.14.tgz" - }, - "mime": { - "version": "1.2.11", - "from": "mime@1.2.11", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.2.11.tgz" - }, "component-emitter": { "version": "1.1.2", "from": "component-emitter@1.1.2", "resolved": "http://registry.npmjs.org/component-emitter/-/component-emitter-1.1.2.tgz" }, - "methods": { - "version": "1.0.1", - "from": "methods@1.0.1", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.0.1.tgz" - }, "cookiejar": { "version": "2.0.1", "from": "cookiejar@2.0.1", "resolved": "https://registry.npmjs.org/cookiejar/-/cookiejar-2.0.1.tgz" }, - "reduce-component": { - "version": "1.0.1", - "from": "reduce-component@1.0.1", - "resolved": "http://registry.npmjs.org/reduce-component/-/reduce-component-1.0.1.tgz" - }, "extend": { "version": "1.2.1", "from": "extend@>=1.2.1 <1.3.0", @@ -459,6 +439,11 @@ "from": "form-data@0.1.3", "resolved": "http://registry.npmjs.org/form-data/-/form-data-0.1.3.tgz", "dependencies": { + "async": { + "version": "0.9.2", + "from": "async@>=0.9.0 <0.10.0", + "resolved": "https://registry.npmjs.org/async/-/async-0.9.2.tgz" + }, "combined-stream": { "version": "0.0.7", "from": "combined-stream@>=0.0.4 <0.1.0", @@ -470,14 +455,29 @@ "resolved": "http://registry.npmjs.org/delayed-stream/-/delayed-stream-0.0.5.tgz" } } - }, - "async": { - "version": "0.9.2", - "from": "async@>=0.9.0 <0.10.0", - "resolved": "https://registry.npmjs.org/async/-/async-0.9.2.tgz" } } }, + "formidable": { + "version": "1.0.14", + "from": "formidable@1.0.14", + "resolved": "https://registry.npmjs.org/formidable/-/formidable-1.0.14.tgz" + }, + "methods": { + "version": "1.0.1", + "from": "methods@1.0.1", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.0.1.tgz" + }, + "mime": { + "version": "1.2.11", + "from": "mime@1.2.11", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.2.11.tgz" + }, + "qs": { + "version": "1.2.0", + "from": "qs@1.2.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-1.2.0.tgz" + }, "readable-stream": { "version": "1.0.27-1", "from": "readable-stream@1.0.27-1", @@ -488,6 +488,11 @@ "from": "core-util-is@>=1.0.0 <1.1.0", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.1.tgz" }, + "inherits": { + "version": "2.0.1", + "from": "inherits@>=2.0.1 <2.1.0", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz" + }, "isarray": { "version": "0.0.1", "from": "isarray@0.0.1", @@ -497,13 +502,13 @@ "version": "0.10.31", "from": "string_decoder@>=0.10.0 <0.11.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz" - }, - "inherits": { - "version": "2.0.1", - "from": "inherits@>=2.0.1 <2.1.0", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz" } } + }, + "reduce-component": { + "version": "1.0.1", + "from": "reduce-component@1.0.1", + "resolved": "http://registry.npmjs.org/reduce-component/-/reduce-component-1.0.1.tgz" } } } diff --git a/package.json b/package.json index 46563240a..e24e953a7 100644 --- a/package.json +++ b/package.json @@ -22,6 +22,7 @@ "morgan": "^1.5.1", "proxy-middleware": "^0.15.0", "safetydance": "0.0.19", + "semver": "^5.1.0", "superagent": "^0.21.0" }, "devDependencies": { From 30e0cb6515d5ef47702897633d9b80120fcb9e01 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Sat, 2 Jan 2016 15:29:20 +0100 Subject: [PATCH 218/234] Upload box tarballs with public acl --- images/createBoxTarball | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/createBoxTarball b/images/createBoxTarball index e15237809..1303475ea 100755 --- a/images/createBoxTarball +++ b/images/createBoxTarball @@ -99,7 +99,7 @@ rm -rf "${bundle_dir}" if [[ "${upload}" == "yes" ]]; then echo "Uploading bundle to S3" # That special header is needed to allow access with singed urls created with different aws credentials than the ones the file got uploaded - s3cmd --multipart-chunk-size-mb=5 --ssl --add-header=x-amz-acl:authenticated-read --access_key="${AWS_DEV_ACCESS_KEY}" --secret_key="${AWS_DEV_SECRET_KEY}" --no-mime-magic put "${bundle_file}" "s3://dev-cloudron-releases/box-${version}.tar.gz" + s3cmd --multipart-chunk-size-mb=5 --ssl --acl-public --access_key="${AWS_DEV_ACCESS_KEY}" --secret_key="${AWS_DEV_SECRET_KEY}" --no-mime-magic put "${bundle_file}" "s3://dev-cloudron-releases/box-${version}.tar.gz" versions_file_url="https://dev-cloudron-releases.s3.amazonaws.com/box-${version}.tar.gz" echo "The URL for the versions file is: ${versions_file_url}" From 640a0b2627c2dd44986beaf66247cc6f682f7bec Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Sat, 2 Jan 2016 15:30:01 +0100 Subject: [PATCH 219/234] Try to get the latest box release if no sourceTarballUrl is specified in the provisioning data --- src/installer.js | 50 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 43 insertions(+), 7 deletions(-) diff --git a/src/installer.js b/src/installer.js index 3d0665aeb..f9d0897de 100644 --- a/src/installer.js +++ b/src/installer.js @@ -6,6 +6,9 @@ var assert = require('assert'), child_process = require('child_process'), debug = require('debug')('installer:installer'), path = require('path'), + safe = require('safetydance'), + semver = require('semver'), + superagent = require('superagent'), util = require('util'); exports = module.exports = { @@ -72,19 +75,52 @@ function retire(args, callback) { spawn('retire', SUDO, pargs, callback); } +function ensureVersion(args, callback) { + assert.strictEqual(typeof args, 'Object'); + assert.strictEqual(typeof callback, 'function'); + + if (args.sourceTarballUrl) return callback(null, args.sourceTarballUrl); + + if (!args.data.boxVersionsUrl) return callback(new Error('No boxVersionsUrl specified')); + + superagent.get(args.data.boxVersionsUrl).end(function (error, result) { + if (error && !error.response) return callback(error); + if (result.statusCode !== 200) return callback(new Error(util.format('Bad status: %s %s', result.statusCode, result.text))); + + var versions = safe.JSON.parse(result.text); + + if (!versions || typeof versions !== 'object') return callback(new Error('versions is not in valid format:' + safe.error)); + + var latestVersion = Object.keys(versions).sort(semver.compare).pop(); + debug('ensureVersion: Latest version is %s etag:%s', latestVersion, result.header['etag']); + + if (!versions[latestVersion]) return callback(new Error('No version available')); + if (!versions[latestVersion].sourceTarballUrl) return callback(new Error('No sourceTarballUrl specified')); + + args.sourceTarballUrl = versions[latestVersion].sourceTarballUrl; + args.data.version = latestVersion; + + callback(null, args); + }); +} + function provision(args, callback) { assert.strictEqual(typeof args, 'object'); assert.strictEqual(typeof callback, 'function'); - var pargs = [ INSTALLER_CMD ]; - pargs.push('--sourcetarballurl', args.sourceTarballUrl); - pargs.push('--data', JSON.stringify(args.data)); + ensureVersion(args, function (error, result) { + if (error) return callback(error); - debug('provision: calling with args %j', pargs); + var pargs = [ INSTALLER_CMD ]; + pargs.push('--sourcetarballurl', result.sourceTarballUrl); + pargs.push('--data', JSON.stringify(result.data)); - if (process.env.NODE_ENV === 'test') return callback(null); + debug('provision: calling with args %j', pargs); - // sudo is required for update() - spawn('provision', SUDO, pargs, callback); + if (process.env.NODE_ENV === 'test') return callback(null); + + // sudo is required for update() + spawn('provision', SUDO, pargs, callback); + }); } From 8e3d1422f39e97174db67f6dfad272e4f4e6d7d1 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Sat, 2 Jan 2016 15:38:44 +0100 Subject: [PATCH 220/234] Fix typo, linter could do some work ;-) --- src/installer.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/installer.js b/src/installer.js index f9d0897de..bc26dc153 100644 --- a/src/installer.js +++ b/src/installer.js @@ -76,7 +76,7 @@ function retire(args, callback) { } function ensureVersion(args, callback) { - assert.strictEqual(typeof args, 'Object'); + assert.strictEqual(typeof args, 'object'); assert.strictEqual(typeof callback, 'function'); if (args.sourceTarballUrl) return callback(null, args.sourceTarballUrl); From 3626cc239410b36a2cc14a3e0c330f9c2fb2fc87 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Sat, 2 Jan 2016 16:58:27 +0100 Subject: [PATCH 221/234] Skip some code during tests for now --- src/installer.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/installer.js b/src/installer.js index bc26dc153..384fed205 100644 --- a/src/installer.js +++ b/src/installer.js @@ -108,6 +108,8 @@ function provision(args, callback) { assert.strictEqual(typeof args, 'object'); assert.strictEqual(typeof callback, 'function'); + if (process.env.NODE_ENV === 'test') return callback(null); + ensureVersion(args, function (error, result) { if (error) return callback(error); @@ -117,8 +119,6 @@ function provision(args, callback) { debug('provision: calling with args %j', pargs); - if (process.env.NODE_ENV === 'test') return callback(null); - // sudo is required for update() spawn('provision', SUDO, pargs, callback); }); From e518976534d7e699bca025dc541afd4edde4f09c Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Mon, 4 Jan 2016 15:57:52 +0100 Subject: [PATCH 222/234] Also support /dev/xvda for box-setup.sh which is used in ec2 --- systemd/box-setup.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/systemd/box-setup.sh b/systemd/box-setup.sh index 0aa6effbd..d827e316a 100755 --- a/systemd/box-setup.sh +++ b/systemd/box-setup.sh @@ -8,17 +8,27 @@ readonly BACKUP_SWAP_FILE="/backup.swap" # used when doing app backups readonly USER_DATA_FILE="/root/user_data.img" readonly USER_DATA_DIR="/home/yellowtent/data" +# detect device +if [[ -b "/dev/vda1" ]]; then + disk_device="/dev/vda1" +fi + +if [[ -b "/dev/xvda1" ]]; then + disk_device="/dev/xvda1" +fi + # all sizes are in mb readonly physical_memory=$(free -m | awk '/Mem:/ { print $2 }') readonly swap_size="${physical_memory}" readonly app_count=$((${physical_memory} / 200)) # estimated app count -readonly disk_size_gb=$(fdisk -l /dev/vda1 | grep 'Disk /dev/vda1' | awk '{ print $3 }') +readonly disk_size_gb=$(fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ print $3 }') readonly disk_size=$((disk_size_gb * 1024)) readonly backup_swap_size=1024 # readonly system_size=5120 # 5 gigs for system libs, installer, box code and tmp readonly system_size=10240 # 10 gigs for system libs, apps images, installer, box code and tmp readonly ext4_reserved=$((disk_size * 5 / 100)) # this can be changes using tune2fs -m percent /dev/vda1 +echo "Disk device: ${disk_device}" echo "Physical memory: ${physical_memory}" echo "Estimated app count: ${app_count}" echo "Disk size: ${disk_size}" From 7cba9f50c8de89d6014249863d67aba1d0e9970d Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Mon, 4 Jan 2016 19:31:32 +0100 Subject: [PATCH 223/234] Docker startup is fixed with new service file, no need to wait --- images/initializeBaseUbuntuImage.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index fab507da4..9266fb6c4 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -128,7 +128,6 @@ mkdir -p "${USER_DATA_DIR}" && mount "${USER_DATA_FILE}" systemctl daemon-reload systemctl enable docker systemctl start docker -sleep 10 # Disable forwarding to metadata route from containers iptables -I FORWARD -d 169.254.169.254 -j DROP From 98ecc2442528a5caa0aa386403b6587a894b6387 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Tue, 5 Jan 2016 15:48:25 +0100 Subject: [PATCH 224/234] Allow metadata access for selfhosters for now --- images/initializeBaseUbuntuImage.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 9266fb6c4..9ac41d456 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -66,9 +66,12 @@ iptables -A INPUT -s 172.17.0.0/16 -j ACCEPT # required to accept any connection iptables -A INPUT -i lo -j ACCEPT iptables -A OUTPUT -o lo -j ACCEPT -# disable metadata access to non-root -# modprobe ipt_owner -iptables -A OUTPUT -m owner ! --uid-owner root -d 169.254.169.254 -j DROP +# The ec2 sysinfo backend needs access to that and runs with user yellowtent +if [ ${SELFHOSTED} == 0 ]; then + # disable metadata access to non-root on caas + # modprobe ipt_owner + iptables -A OUTPUT -m owner ! --uid-owner root -d 169.254.169.254 -j DROP +fi # prevent DoS # iptables -A INPUT -p tcp --dport 80 -m limit --limit 25/minute --limit-burst 100 -j ACCEPT From fd9dcd065afea6daafccebe32819c2a247d4efd7 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Tue, 5 Jan 2016 21:11:41 +0100 Subject: [PATCH 225/234] Bring back the sleep 10 to wait for docker's iptable rules See comment in code for further details --- images/initializeBaseUbuntuImage.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 9ac41d456..9be9a8ba1 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -127,11 +127,14 @@ mkfs.btrfs -L UserHome "${USER_DATA_FILE}" echo "${USER_DATA_FILE} ${USER_DATA_DIR} btrfs loop,nosuid 0 0" >> /etc/fstab mkdir -p "${USER_DATA_DIR}" && mount "${USER_DATA_FILE}" -# give docker sometime to start up and create iptables rules systemctl daemon-reload systemctl enable docker systemctl start docker +# give docker sometime to start up and create iptables rules +# those rules come in after docker has started, and we want to wait for them to be sure iptables-save has all of them +sleep 10 + # Disable forwarding to metadata route from containers iptables -I FORWARD -d 169.254.169.254 -j DROP From 97a1fc62ae07c079838bb01f1935c0a9d350ebb3 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Tue, 5 Jan 2016 21:13:43 +0100 Subject: [PATCH 226/234] This rule is obsolete It should protect the metadata from apps, but that is already covered with the FORWARD dropping rule below --- images/initializeBaseUbuntuImage.sh | 7 ------- 1 file changed, 7 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 9be9a8ba1..bf8a57cc5 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -66,13 +66,6 @@ iptables -A INPUT -s 172.17.0.0/16 -j ACCEPT # required to accept any connection iptables -A INPUT -i lo -j ACCEPT iptables -A OUTPUT -o lo -j ACCEPT -# The ec2 sysinfo backend needs access to that and runs with user yellowtent -if [ ${SELFHOSTED} == 0 ]; then - # disable metadata access to non-root on caas - # modprobe ipt_owner - iptables -A OUTPUT -m owner ! --uid-owner root -d 169.254.169.254 -j DROP -fi - # prevent DoS # iptables -A INPUT -p tcp --dport 80 -m limit --limit 25/minute --limit-burst 100 -j ACCEPT From aa0486bc2b62e8c25352319a224a043413d15502 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 5 Jan 2016 13:27:09 -0800 Subject: [PATCH 227/234] use 15.10 as base image --- images/digitalocean.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/digitalocean.sh b/images/digitalocean.sh index f770d7466..db8d5cbc1 100755 --- a/images/digitalocean.sh +++ b/images/digitalocean.sh @@ -30,7 +30,7 @@ function create_droplet() { local box_name="$2" local image_region="sfo1" - local ubuntu_image_slug="ubuntu-15-04-x64" # id=12658446 + local ubuntu_image_slug="ubuntu-15-10-x64" local box_size="512mb" local data="{\"name\":\"${box_name}\",\"size\":\"${box_size}\",\"region\":\"${image_region}\",\"image\":\"${ubuntu_image_slug}\",\"ssh_keys\":[ \"${ssh_key_id}\" ],\"backups\":false}" From e162582045377c5e8d5dcebe92f2791a4e33aee7 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 5 Jan 2016 15:12:58 -0800 Subject: [PATCH 228/234] Add collectd hack --- images/initializeBaseUbuntuImage.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index bf8a57cc5..853281e49 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -182,7 +182,11 @@ echo "==== Install pwgen ====" apt-get -y install pwgen echo "==== Install collectd ===" -apt-get install -y collectd collectd-utils +if ! apt-get install -y collectd collectd-utils; then + # FQDNLookup is true in default debian config. The box code has a custom collectd.conf that fixes this + echo "Failed to install collectd. Presumably because of http://mailman.verplant.org/pipermail/collectd/2015-March/006491.html" + sed -e 's/^FQDNLookup true/FQDNLookup false/' -i /etc/collectd/collectd.conf +fi update-rc.d -f collectd remove # this simply makes it explicit that we run logrotate via cron. it's already part of base ubuntu From 910be97f5483ce1babb69c71d30aad4f49298b1e Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 5 Jan 2016 16:16:22 -0800 Subject: [PATCH 229/234] return calling callback --- src/server.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server.js b/src/server.js index 6822066d3..657c09edd 100755 --- a/src/server.js +++ b/src/server.js @@ -37,7 +37,7 @@ function provisionDigitalOcean(callback) { superagent.get('http://169.254.169.254/metadata/v1.json').end(function (error, result) { if (error || result.statusCode !== 200) { console.error('Error getting metadata', error); - return; + return callback(new Error('Error getting metadata')); } var userData = JSON.parse(result.body.user_data); @@ -51,7 +51,7 @@ function provisionLocal(callback) { if (!fs.existsSync(PROVISION_CONFIG_FILE)) { console.error('No provisioning data found at %s', PROVISION_CONFIG_FILE); - return; + return callback(new Error('No provisioning data found')); } var userData = require(PROVISION_CONFIG_FILE); From fdf1ed829d837ec374f13c71cc4d2513fbec43c4 Mon Sep 17 00:00:00 2001 From: Girish Ramakrishnan Date: Tue, 5 Jan 2016 20:32:09 -0800 Subject: [PATCH 230/234] Revert "use 15.10 as base image" This reverts commit 50807f3046fdf715cb3bf2afc08436f64995f36a. 15.10 requires more work --- images/digitalocean.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/digitalocean.sh b/images/digitalocean.sh index db8d5cbc1..f770d7466 100755 --- a/images/digitalocean.sh +++ b/images/digitalocean.sh @@ -30,7 +30,7 @@ function create_droplet() { local box_name="$2" local image_region="sfo1" - local ubuntu_image_slug="ubuntu-15-10-x64" + local ubuntu_image_slug="ubuntu-15-04-x64" # id=12658446 local box_size="512mb" local data="{\"name\":\"${box_name}\",\"size\":\"${box_size}\",\"region\":\"${image_region}\",\"image\":\"${ubuntu_image_slug}\",\"ssh_keys\":[ \"${ssh_key_id}\" ],\"backups\":false}" From 5914fd9fb7dc52bf66a75c216f93340cd82bc5eb Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Wed, 6 Jan 2016 14:52:21 +0100 Subject: [PATCH 231/234] Return the whole object, not just the string --- src/installer.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/installer.js b/src/installer.js index 384fed205..1d7a6cbfd 100644 --- a/src/installer.js +++ b/src/installer.js @@ -79,7 +79,7 @@ function ensureVersion(args, callback) { assert.strictEqual(typeof args, 'object'); assert.strictEqual(typeof callback, 'function'); - if (args.sourceTarballUrl) return callback(null, args.sourceTarballUrl); + if (args.sourceTarballUrl) return callback(null, args); if (!args.data.boxVersionsUrl) return callback(new Error('No boxVersionsUrl specified')); From 464f0fc2313ca94fbe47f101c1f123c13b6dfab1 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Wed, 6 Jan 2016 16:02:42 +0100 Subject: [PATCH 232/234] add some unit tests for ensureVersion --- src/installer.js | 8 +++-- src/test/installer-test.js | 68 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 3 deletions(-) diff --git a/src/installer.js b/src/installer.js index 1d7a6cbfd..eb6a6ac7f 100644 --- a/src/installer.js +++ b/src/installer.js @@ -15,7 +15,9 @@ exports = module.exports = { InstallerError: InstallerError, provision: provision, - retire: retire + retire: retire, + + _ensureVersion: ensureVersion }; var INSTALLER_CMD = path.join(__dirname, 'scripts/installer.sh'), @@ -79,9 +81,9 @@ function ensureVersion(args, callback) { assert.strictEqual(typeof args, 'object'); assert.strictEqual(typeof callback, 'function'); - if (args.sourceTarballUrl) return callback(null, args); + if (!args.data || !args.data.boxVersionsUrl) return callback(new Error('No boxVersionsUrl specified')); - if (!args.data.boxVersionsUrl) return callback(new Error('No boxVersionsUrl specified')); + if (args.sourceTarballUrl) return callback(null, args); superagent.get(args.data.boxVersionsUrl).end(function (error, result) { if (error && !error.response) return callback(error); diff --git a/src/test/installer-test.js b/src/test/installer-test.js index c36cc8aa4..cb7b26731 100644 --- a/src/test/installer-test.js +++ b/src/test/installer-test.js @@ -13,6 +13,7 @@ var expect = require('expect.js'), os = require('os'), request = require('superagent'), server = require('../server.js'), + installer = require('../installer.js'), _ = require('lodash'); var EXTERNAL_SERVER_URL = 'https://localhost:4443'; @@ -147,5 +148,72 @@ describe('Server', function () { }); }); }); + + describe('ensureVersion', function () { + before(function () { + process.env.NODE_ENV = undefined; + }); + + after(function () { + process.env.NODE_ENV = 'test'; + }); + + it ('fails without data', function (done) { + installer._ensureVersion({}, function (error) { + expect(error).to.be.an(Error); + done(); + }); + }); + + it ('fails without boxVersionsUrl', function (done) { + installer._ensureVersion({ data: {}}, function (error) { + expect(error).to.be.an(Error); + done(); + }); + }); + + it ('succeeds with sourceTarballUrl', function (done) { + var data = { + sourceTarballUrl: 'sometarballurl', + data: { + boxVersionsUrl: 'http://foobar/versions.json' + } + }; + + installer._ensureVersion(data, function (error, result) { + expect(error).to.equal(null); + expect(result).to.eql(data); + done(); + }); + }); + + it ('succeeds without sourceTarballUrl', function (done) { + var versions = { + '0.1.0': { + sourceTarballUrl: 'sometarballurl1' + }, + '0.2.0': { + sourceTarballUrl: 'sometarballurl2' + } + }; + + var scope = nock('http://foobar') + .get('/versions.json') + .reply(200, JSON.stringify(versions), { 'Content-Type': 'application/json' }); + + var data = { + data: { + boxVersionsUrl: 'http://foobar/versions.json' + } + }; + + installer._ensureVersion(data, function (error, result) { + expect(error).to.equal(null); + expect(result.sourceTarballUrl).to.equal(versions['0.2.0'].sourceTarballUrl); + expect(result.data.boxVersionsUrl).to.equal(data.data.boxVersionsUrl); + done(); + }); + }); + }); }); From b562cd5c73a9b71e6771c31d892a395bb43dc715 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Fri, 8 Jan 2016 13:15:07 +0100 Subject: [PATCH 233/234] Fix typo when specifying the provisionEnv environment var --- images/initializeBaseUbuntuImage.sh | 2 +- src/server.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/images/initializeBaseUbuntuImage.sh b/images/initializeBaseUbuntuImage.sh index 853281e49..26b2a0026 100755 --- a/images/initializeBaseUbuntuImage.sh +++ b/images/initializeBaseUbuntuImage.sh @@ -212,7 +212,7 @@ cd "${INSTALLER_SOURCE_DIR}" && npm install --production chown "${USER}:${USER}" -R "${INSTALLER_SOURCE_DIR}" echo "==== Install installer systemd script ====" -provisionEnv="digitalocean" +provisionEnv="PROVISION=digitalocean" if [ ${SELFHOSTED} == 1 ]; then provisionEnv="PROVISION=local" fi diff --git a/src/server.js b/src/server.js index 657c09edd..b3628e9c5 100755 --- a/src/server.js +++ b/src/server.js @@ -184,7 +184,7 @@ function start(callback) { startUpdateServer, provisionLocal ]; - } else { // current fallback, should be 'digitalocean' eventually + } else { // current fallback, should be 'digitalocean' eventually, see initializeBaseUbuntuImage.sh debug('Starting Installer in managed mode'); actions = [ From 3c0b88a1eedf9c71c3c1ac69ec238f3302b79438 Mon Sep 17 00:00:00 2001 From: Johannes Zellner Date: Mon, 11 Jan 2016 14:42:20 +0100 Subject: [PATCH 234/234] Move to subfolder installer/ --- {images => installer/images}/createBoxTarball | 0 {images => installer/images}/createImage | 0 {images => installer/images}/digitalocean.sh | 0 {images => installer/images}/images | 0 {images => installer/images}/initializeBaseUbuntuImage.sh | 0 {images => installer/images}/vultr.js | 0 npm-shrinkwrap.json => installer/npm-shrinkwrap.json | 0 package.json => installer/package.json | 0 {src => installer/src}/certs/.gitignore | 0 {src => installer/src}/installer.js | 0 {src => installer/src}/scripts/installer.sh | 0 {src => installer/src}/scripts/retire.sh | 0 {src => installer/src}/server.js | 0 {src => installer/src}/test/certs/ca.crt | 0 {src => installer/src}/test/certs/server.crt | 0 {src => installer/src}/test/certs/server.key | 0 {src => installer/src}/test/installer-test.js | 0 {systemd => installer/systemd}/box-setup.sh | 0 18 files changed, 0 insertions(+), 0 deletions(-) rename {images => installer/images}/createBoxTarball (100%) rename {images => installer/images}/createImage (100%) rename {images => installer/images}/digitalocean.sh (100%) rename {images => installer/images}/images (100%) rename {images => installer/images}/initializeBaseUbuntuImage.sh (100%) rename {images => installer/images}/vultr.js (100%) rename npm-shrinkwrap.json => installer/npm-shrinkwrap.json (100%) rename package.json => installer/package.json (100%) rename {src => installer/src}/certs/.gitignore (100%) rename {src => installer/src}/installer.js (100%) rename {src => installer/src}/scripts/installer.sh (100%) rename {src => installer/src}/scripts/retire.sh (100%) rename {src => installer/src}/server.js (100%) rename {src => installer/src}/test/certs/ca.crt (100%) rename {src => installer/src}/test/certs/server.crt (100%) rename {src => installer/src}/test/certs/server.key (100%) rename {src => installer/src}/test/installer-test.js (100%) rename {systemd => installer/systemd}/box-setup.sh (100%) diff --git a/images/createBoxTarball b/installer/images/createBoxTarball similarity index 100% rename from images/createBoxTarball rename to installer/images/createBoxTarball diff --git a/images/createImage b/installer/images/createImage similarity index 100% rename from images/createImage rename to installer/images/createImage diff --git a/images/digitalocean.sh b/installer/images/digitalocean.sh similarity index 100% rename from images/digitalocean.sh rename to installer/images/digitalocean.sh diff --git a/images/images b/installer/images/images similarity index 100% rename from images/images rename to installer/images/images diff --git a/images/initializeBaseUbuntuImage.sh b/installer/images/initializeBaseUbuntuImage.sh similarity index 100% rename from images/initializeBaseUbuntuImage.sh rename to installer/images/initializeBaseUbuntuImage.sh diff --git a/images/vultr.js b/installer/images/vultr.js similarity index 100% rename from images/vultr.js rename to installer/images/vultr.js diff --git a/npm-shrinkwrap.json b/installer/npm-shrinkwrap.json similarity index 100% rename from npm-shrinkwrap.json rename to installer/npm-shrinkwrap.json diff --git a/package.json b/installer/package.json similarity index 100% rename from package.json rename to installer/package.json diff --git a/src/certs/.gitignore b/installer/src/certs/.gitignore similarity index 100% rename from src/certs/.gitignore rename to installer/src/certs/.gitignore diff --git a/src/installer.js b/installer/src/installer.js similarity index 100% rename from src/installer.js rename to installer/src/installer.js diff --git a/src/scripts/installer.sh b/installer/src/scripts/installer.sh similarity index 100% rename from src/scripts/installer.sh rename to installer/src/scripts/installer.sh diff --git a/src/scripts/retire.sh b/installer/src/scripts/retire.sh similarity index 100% rename from src/scripts/retire.sh rename to installer/src/scripts/retire.sh diff --git a/src/server.js b/installer/src/server.js similarity index 100% rename from src/server.js rename to installer/src/server.js diff --git a/src/test/certs/ca.crt b/installer/src/test/certs/ca.crt similarity index 100% rename from src/test/certs/ca.crt rename to installer/src/test/certs/ca.crt diff --git a/src/test/certs/server.crt b/installer/src/test/certs/server.crt similarity index 100% rename from src/test/certs/server.crt rename to installer/src/test/certs/server.crt diff --git a/src/test/certs/server.key b/installer/src/test/certs/server.key similarity index 100% rename from src/test/certs/server.key rename to installer/src/test/certs/server.key diff --git a/src/test/installer-test.js b/installer/src/test/installer-test.js similarity index 100% rename from src/test/installer-test.js rename to installer/src/test/installer-test.js diff --git a/systemd/box-setup.sh b/installer/systemd/box-setup.sh similarity index 100% rename from systemd/box-setup.sh rename to installer/systemd/box-setup.sh