diff --git a/src/scripts/backupapp.sh b/src/scripts/backupapp.sh index b1ee068ea..bca3782c4 100755 --- a/src/scripts/backupapp.sh +++ b/src/scripts/backupapp.sh @@ -15,8 +15,8 @@ fi readonly DATA_DIR="${HOME}/data" # verify argument count -if [[ "$1" == "s3" && $# -lt 8 ]]; then - echo "Usage: backupapp.sh s3 [session token]" +if [[ "$1" == "s3" && $# -lt 9 ]]; then + echo "Usage: backupapp.sh s3 [session token]" exit 1 fi @@ -36,10 +36,11 @@ if [[ "$1" == "s3" ]]; then export AWS_ACCESS_KEY_ID="$5" export AWS_SECRET_ACCESS_KEY="$6" export AWS_DEFAULT_REGION="$7" - readonly password="$8" + readonly endpoint_url="$8" + readonly password="$9" - if [ $# -gt 8 ]; then - export AWS_SESSION_TOKEN="$9" + if [ $# -gt 9 ]; then + export AWS_SESSION_TOKEN="$10" fi fi @@ -61,6 +62,12 @@ btrfs subvolume snapshot -r "${app_data_dir}" "${app_data_snapshot}" try=0 if [[ "$1" == "s3" ]]; then + # may be empty + optional_args="" + if [ -n "${endpoint_url}" ]; then + optional_args="--endpoint-url ${endpoint_url}" + fi + # Upload config.json first because uploading tarball might take a lot of time, leading to token expiry for try in `seq 1 5`; do echo "Uploading config.json to ${s3_config_url} (try ${try})" @@ -69,7 +76,7 @@ if [[ "$1" == "s3" ]]; then # use aws instead of curl because curl will always read entire stream memory to set Content-Length # aws will do multipart upload if cat "${app_data_snapshot}/config.json" \ - | aws s3 cp - "${s3_config_url}" 2>"${error_log}"; then + | aws ${optional_args} s3 cp - "${s3_config_url}" 2>"${error_log}"; then break fi cat "${error_log}" && rm "${error_log}" @@ -87,7 +94,7 @@ if [[ "$1" == "s3" ]]; then if tar -czf - -C "${app_data_snapshot}" . \ | openssl aes-256-cbc -e -pass "pass:${password}" \ - | aws s3 cp - "${s3_data_url}" 2>"${error_log}"; then + | aws ${optional_args} s3 cp - "${s3_data_url}" 2>"${error_log}"; then break fi cat "${error_log}" && rm "${error_log}" diff --git a/src/scripts/backupbox.sh b/src/scripts/backupbox.sh index f011eb732..06e83027d 100755 --- a/src/scripts/backupbox.sh +++ b/src/scripts/backupbox.sh @@ -14,8 +14,8 @@ fi # verify argument count -if [[ "$1" == "s3" && $# -lt 6 ]]; then - echo "Usage: backupbox.sh s3 [session token]" +if [[ "$1" == "s3" && $# -lt 7 ]]; then + echo "Usage: backupbox.sh s3 [session token]" exit 1 fi @@ -31,10 +31,11 @@ if [[ "$1" == "s3" ]]; then export AWS_ACCESS_KEY_ID="$3" export AWS_SECRET_ACCESS_KEY="$4" export AWS_DEFAULT_REGION="$5" - readonly password="$6" + readonly endpoint_url="$6" + readonly password="$7" if [ $# -gt 6 ]; then - export AWS_SESSION_TOKEN="$7" + export AWS_SESSION_TOKEN="$8" fi fi @@ -63,11 +64,17 @@ if [[ "$1" == "s3" ]]; then echo "Uploading backup to ${s3_url} (try ${try})" error_log=$(mktemp) + # may be empty + optional_args="" + if [ -n "${endpoint_url}" ]; then + optional_args="--endpoint-url ${endpoint_url}" + fi + # use aws instead of curl because curl will always read entire stream memory to set Content-Length # aws will do multipart upload if tar -czf - -C "${box_snapshot_dir}" . \ | openssl aes-256-cbc -e -pass "pass:${password}" \ - | aws s3 cp - "${s3_url}" 2>"${error_log}"; then + | aws ${optional_args} s3 cp - "${s3_url}" 2>"${error_log}"; then break fi cat "${error_log}" && rm "${error_log}" diff --git a/src/scripts/backuptests3.sh b/src/scripts/backuptests3.sh index 28917561b..7d8321247 100755 --- a/src/scripts/backuptests3.sh +++ b/src/scripts/backuptests3.sh @@ -6,7 +6,14 @@ readonly s3_url="$1" export AWS_ACCESS_KEY_ID="$2" export AWS_SECRET_ACCESS_KEY="$3" export AWS_DEFAULT_REGION="$4" +readonly endpoint_url="$5" -echo "Test Content" | aws s3 cp - "${s3_url}" +optional_args="" -aws s3 rm "${s3_url}" +if [ -n "${endpoint_url}" ]; then + optional_args="--endpoint-url ${endpoint_url}" +fi + +echo "Test Content" | aws ${optional_args} s3 cp - "${s3_url}" + +aws ${optional_args} s3 rm "${s3_url}" diff --git a/src/storage/s3.js b/src/storage/s3.js index a646c75a5..df7837d17 100644 --- a/src/storage/s3.js +++ b/src/storage/s3.js @@ -48,7 +48,7 @@ function getBoxBackupDetails(apiConfig, id, callback) { var region = apiConfig.region || 'us-east-1'; var details = { - backupScriptArguments: [ 's3', s3Url, apiConfig.accessKeyId, apiConfig.secretAccessKey, region, apiConfig.key ] + backupScriptArguments: [ 's3', s3Url, apiConfig.accessKeyId, apiConfig.secretAccessKey, region, apiConfig.endpoint || '', apiConfig.key ] }; callback(null, details); @@ -66,7 +66,7 @@ function getAppBackupDetails(apiConfig, appId, dataId, configId, callback) { var region = apiConfig.region || 'us-east-1'; var details = { - backupScriptArguments: [ 's3', appId, s3ConfigUrl, s3DataUrl, apiConfig.accessKeyId, apiConfig.secretAccessKey, region, apiConfig.key ] + backupScriptArguments: [ 's3', appId, s3ConfigUrl, s3DataUrl, apiConfig.accessKeyId, apiConfig.secretAccessKey, region, apiConfig.endpoint || '', apiConfig.key ] }; callback(null, details); @@ -191,7 +191,7 @@ function testConfig(apiConfig, callback) { // now perform the same as what we do in the backup shell scripts var BACKUP_TEST_CMD = require('path').join(__dirname, '../scripts/backuptests3.sh'); var tmpUrl = 's3://' + apiConfig.bucket + '/' + apiConfig.prefix + '/testfile'; - var args = [ tmpUrl, apiConfig.accessKeyId, apiConfig.secretAccessKey, apiConfig.region ]; + var args = [ tmpUrl, credentials.accessKeyId, credentials.secretAccessKey, credentials.region, credentials.endpoint || '' ]; // if this fails the region is wrong, otherwise we would have failed earlier. shell.exec('backupTestS3', BACKUP_TEST_CMD, args, function (error) {