diff --git a/src/backups.js b/src/backups.js index 999477a0a..51194fe14 100644 --- a/src/backups.js +++ b/src/backups.js @@ -128,6 +128,7 @@ function getBoxBackupCredentials(appBackupIds, callback) { api(backupConfig.provider).getBackupCredentials(backupConfig, function (error, result) { if (error) return callback(error); + result.provider = backupConfig.provider; result.id = filename; result.s3Url = 's3://' + backupConfig.bucket + '/' + backupConfig.prefix + '/' + filename; result.backupKey = backupConfig.key; @@ -257,8 +258,13 @@ function backupBoxWithAppBackupIds(appBackupIds, callback) { debug('backupBoxWithAppBackupIds: %j', result); - var args = [ result.s3Url, result.accessKeyId, result.secretAccessKey, result.region, result.backupKey ]; - if (result.sessionToken) args.push(result.sessionToken); + var args; + if (result.provider === 'filesystem') { + args = [ 'filesystem', '/tmp/backups', result.id, result.backupKey ]; + } else { + args = [ 's3', result.s3Url, result.accessKeyId, result.secretAccessKey, result.region, result.backupKey ]; + if (result.sessionToken) args.push(result.sessionToken); + } shell.sudo('backupBox', [ BACKUP_BOX_CMD ].concat(args), function (error) { if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error)); @@ -324,8 +330,13 @@ function createNewAppBackup(app, manifest, callback) { debugApp(app, 'createNewAppBackup: backup url:%s backup config url:%s', result.s3DataUrl, result.s3ConfigUrl); - var args = [ app.id, result.s3ConfigUrl, result.s3DataUrl, result.accessKeyId, result.secretAccessKey, result.region, result.backupKey ]; - if (result.sessionToken) args.push(result.sessionToken); + var args; + if (result.provider === 'filesystem') { + args = [ 'filesystem', '/tmp/backups', result.id + '.json', result.id, result.backupKey ]; + } else { + args = [ 's3', app.id, result.s3ConfigUrl, result.s3DataUrl, result.accessKeyId, result.secretAccessKey, result.region, result.backupKey ]; + if (result.sessionToken) args.push(result.sessionToken); + } async.series([ addons.backupAddons.bind(null, app, manifest.addons), diff --git a/src/scripts/backupapp.sh b/src/scripts/backupapp.sh index d9970a363..4a542e891 100755 --- a/src/scripts/backupapp.sh +++ b/src/scripts/backupapp.sh @@ -12,63 +12,94 @@ if [[ $# == 1 && "$1" == "--check" ]]; then exit 0 fi -if [ $# -lt 7 ]; then - echo "Usage: backupapp.sh [session token]" +readonly DATA_DIR="${HOME}/data" + +# verify argument count +if [[ "$1" == "s3" && $# -lt 8 ]]; then + echo "Usage: backupapp.sh s3 [session token]" + echo "Usage: backupapp.sh filesystem " exit 1 fi -readonly DATA_DIR="${HOME}/data" - -# env vars used by the awscli -readonly app_id="$1" -readonly s3_config_url="$2" -readonly s3_data_url="$3" -export AWS_ACCESS_KEY_ID="$4" -export AWS_SECRET_ACCESS_KEY="$5" -export AWS_DEFAULT_REGION="$6" -readonly password="$7" - -if [ $# -gt 7 ]; then - export AWS_SESSION_TOKEN="$8" +if [[ "$1" == "filesystem" && $# -lt 5 ]]; then + echo "Usage: backupapp.sh filesystem " + exit 1 fi + +# extract arguments +if [[ "$1" == "s3" ]]; then + # env vars used by the awscli + readonly app_id="$2" + readonly s3_config_url="$3" + readonly s3_data_url="$4" + export AWS_ACCESS_KEY_ID="$5" + export AWS_SECRET_ACCESS_KEY="$6" + export AWS_DEFAULT_REGION="$7" + readonly password="$8" + + if [ $# -gt 8 ]; then + export AWS_SESSION_TOKEN="$9" + fi +fi + +if [[ "$1" == "filesystem" ]]; then + readonly backup_folder="$2" + readonly backup_config_fileName="$3" + readonly backup_data_fileName="$4" + readonly password="$5" +fi + +# perform backup readonly now=$(date "+%Y-%m-%dT%H:%M:%S") readonly app_data_dir="${DATA_DIR}/${app_id}" readonly app_data_snapshot="${DATA_DIR}/snapshots/${app_id}-${now}" btrfs subvolume snapshot -r "${app_data_dir}" "${app_data_snapshot}" -# Upload config.json first because uploading tarball might take a lot of time, leading to token expiry -for try in `seq 1 5`; do - echo "Uploading config.json to ${s3_config_url} (try ${try})" - error_log=$(mktemp) +if [[ "$1" == "s3" ]]; then + # Upload config.json first because uploading tarball might take a lot of time, leading to token expiry + for try in `seq 1 5`; do + echo "Uploading config.json to ${s3_config_url} (try ${try})" + error_log=$(mktemp) - # use aws instead of curl because curl will always read entire stream memory to set Content-Length - # aws will do multipart upload - if cat "${app_data_snapshot}/config.json" \ - | aws s3 cp - "${s3_config_url}" 2>"${error_log}"; then - break + # use aws instead of curl because curl will always read entire stream memory to set Content-Length + # aws will do multipart upload + if cat "${app_data_snapshot}/config.json" \ + | aws s3 cp - "${s3_config_url}" 2>"${error_log}"; then + break + fi + cat "${error_log}" && rm "${error_log}" + done + + if [[ ${try} -eq 5 ]]; then + echo "Backup failed uploading config.json" + btrfs subvolume delete "${app_data_snapshot}" + exit 3 fi - cat "${error_log}" && rm "${error_log}" -done -if [[ ${try} -eq 5 ]]; then - echo "Backup failed uploading config.json" - btrfs subvolume delete "${app_data_snapshot}" - exit 3 + for try in `seq 1 5`; do + echo "Uploading backup to ${s3_data_url} (try ${try})" + error_log=$(mktemp) + + if tar -czf - -C "${app_data_snapshot}" . \ + | openssl aes-256-cbc -e -pass "pass:${password}" \ + | aws s3 cp - "${s3_data_url}" 2>"${error_log}"; then + break + fi + cat "${error_log}" && rm "${error_log}" + done fi -for try in `seq 1 5`; do - echo "Uploading backup to ${s3_data_url} (try ${try})" - error_log=$(mktemp) +if [[ "$1" == "filesystem" ]]; then + mkdir -p "${backup_folder}" - if tar -czf - -C "${app_data_snapshot}" . \ - | openssl aes-256-cbc -e -pass "pass:${password}" \ - | aws s3 cp - "${s3_data_url}" 2>"${error_log}"; then - break - fi - cat "${error_log}" && rm "${error_log}" -done + echo "Storing backup config to ${backup_folder}/${backup_config_fileName}" + cat "${app_data_snapshot}/config.json" > "${backup_folder}/${backup_config_fileName}" + + echo "Storing backup data to ${backup_folder}/${backup_data_fileName}" + tar -czf - -C "${app_data_snapshot}" . | openssl aes-256-cbc -e -pass "pass:${password}" > "${backup_folder}/${backup_data_fileName}" +fi btrfs subvolume delete "${app_data_snapshot}" diff --git a/src/scripts/backupbox.sh b/src/scripts/backupbox.sh index 7c59bd037..d570a6d10 100755 --- a/src/scripts/backupbox.sh +++ b/src/scripts/backupbox.sh @@ -12,22 +12,39 @@ if [[ $# == 1 && "$1" == "--check" ]]; then exit 0 fi -if [ $# -lt 5 ]; then - echo "Usage: backupbox.sh [session token]" + +# verify argument count +if [[ "$1" === "s3" && $# -lt 6 ]]; then + echo "Usage: backupbox.sh s3 [session token]" exit 1 fi -# env vars used by the awscli -s3_url="$1" -export AWS_ACCESS_KEY_ID="$2" -export AWS_SECRET_ACCESS_KEY="$3" -export AWS_DEFAULT_REGION="$4" -password="$5" - -if [ $# -gt 5 ]; then - export AWS_SESSION_TOKEN="$6" +if [[ "$1" === "filesystem" && $# -lt 4 ]]; then + echo "Usage: backupbox.sh filesystem " + exit 1 fi +# extract arguments +if [[ "$1" == "s3" ]]; then + # env vars used by the awscli + readonly s3_url="$2" + export AWS_ACCESS_KEY_ID="$3" + export AWS_SECRET_ACCESS_KEY="$4" + export AWS_DEFAULT_REGION="$5" + readonly password="$6" + + if [ $# -gt 6 ]; then + export AWS_SESSION_TOKEN="$7" + fi +fi + +if [[ "$1" == "filesystem" ]]; then + readonly backup_folder="$2" + readonly backup_fileName="$3" + readonly password="$4" +fi + +# perform backup now=$(date "+%Y-%m-%dT%H:%M:%S") BOX_DATA_DIR="${HOME}/data/box" box_snapshot_dir="${HOME}/data/snapshots/box-${now}" @@ -38,19 +55,29 @@ mysqldump -u root -ppassword --single-transaction --routines --triggers box > "$ echo "Snapshoting backup as backup-${now}" btrfs subvolume snapshot -r "${BOX_DATA_DIR}" "${box_snapshot_dir}" -for try in `seq 1 5`; do - echo "Uploading backup to ${s3_url} (try ${try})" - error_log=$(mktemp) +if [[ "$1" == "s3" ]]; then + for try in `seq 1 5`; do + echo "Uploading backup to ${s3_url} (try ${try})" + error_log=$(mktemp) - # use aws instead of curl because curl will always read entire stream memory to set Content-Length - # aws will do multipart upload - if tar -czf - -C "${box_snapshot_dir}" . \ - | openssl aes-256-cbc -e -pass "pass:${password}" \ - | aws s3 cp - "${s3_url}" 2>"${error_log}"; then - break - fi - cat "${error_log}" && rm "${error_log}" -done + # use aws instead of curl because curl will always read entire stream memory to set Content-Length + # aws will do multipart upload + if tar -czf - -C "${box_snapshot_dir}" . \ + | openssl aes-256-cbc -e -pass "pass:${password}" \ + | aws s3 cp - "${s3_url}" 2>"${error_log}"; then + break + fi + cat "${error_log}" && rm "${error_log}" + done +fi + +if [[ "$1" == "filesystem" ]]; then + echo "Storing backup to ${backup_folder}/${backup_fileName}" + + mkdir -p "${backup_folder}" + + tar -czf - -C "${box_snapshot_dir}" . | openssl aes-256-cbc -e -pass "pass:${password}" > "${backup_folder}/${backup_fileName}" +fi echo "Deleting backup snapshot" btrfs subvolume delete "${box_snapshot_dir}" @@ -61,4 +88,3 @@ if [[ ${try} -eq 5 ]]; then else echo "Backup successful" fi -