make startBackupTask async
This commit is contained in:
+21
-24
@@ -44,6 +44,7 @@ const assert = require('assert'),
|
||||
CronJob = require('cron').CronJob,
|
||||
crypto = require('crypto'),
|
||||
database = require('./database.js'),
|
||||
debug = require('debug')('box:backups'),
|
||||
ejs = require('ejs'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
fs = require('fs'),
|
||||
@@ -53,10 +54,7 @@ const assert = require('assert'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
storage = require('./storage.js'),
|
||||
tasks = require('./tasks.js'),
|
||||
util = require('util');
|
||||
|
||||
const getBackupConfig = util.callbackify(settings.getBackupConfig);
|
||||
tasks = require('./tasks.js');
|
||||
|
||||
const COLLECTD_CONFIG_EJS = fs.readFileSync(__dirname + '/collectd/cloudron-backup.ejs', { encoding: 'utf8' });
|
||||
|
||||
@@ -177,31 +175,28 @@ async function update(id, backup) {
|
||||
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
||||
}
|
||||
|
||||
function startBackupTask(auditSource, callback) {
|
||||
async function startBackupTask(auditSource) {
|
||||
let error = locker.lock(locker.OP_FULL_BACKUP);
|
||||
if (error) return callback(new BoxError(BoxError.BAD_STATE, `Cannot backup now: ${error.message}`));
|
||||
if (error) throw new BoxError(BoxError.BAD_STATE, `Cannot backup now: ${error.message}`);
|
||||
|
||||
getBackupConfig(async function (error, backupConfig) {
|
||||
if (error) return callback(error);
|
||||
const backupConfig = await settings.getBackupConfig();
|
||||
|
||||
const memoryLimit = 'memoryLimit' in backupConfig ? Math.max(backupConfig.memoryLimit/1024/1024, 400) : 400;
|
||||
const memoryLimit = 'memoryLimit' in backupConfig ? Math.max(backupConfig.memoryLimit/1024/1024, 400) : 400;
|
||||
|
||||
const [taskError, taskId] = await safe(tasks.add(tasks.TASK_BACKUP, [ { /* options */ } ]));
|
||||
if (taskError) return callback(taskError);
|
||||
const taskId = await tasks.add(tasks.TASK_BACKUP, [ { /* options */ } ]);
|
||||
|
||||
eventlog.add(eventlog.ACTION_BACKUP_START, auditSource, { taskId });
|
||||
await eventlog.add(eventlog.ACTION_BACKUP_START, auditSource, { taskId });
|
||||
|
||||
tasks.startTask(taskId, { timeout: 24 * 60 * 60 * 1000 /* 24 hours */, nice: 15, memoryLimit }, async function (error, backupId) {
|
||||
locker.unlock(locker.OP_FULL_BACKUP);
|
||||
tasks.startTask(taskId, { timeout: 24 * 60 * 60 * 1000 /* 24 hours */, nice: 15, memoryLimit }, async function (error, backupId) {
|
||||
locker.unlock(locker.OP_FULL_BACKUP);
|
||||
|
||||
const errorMessage = error ? error.message : '';
|
||||
const timedOut = error ? error.code === tasks.ETIMEOUT : false;
|
||||
const errorMessage = error ? error.message : '';
|
||||
const timedOut = error ? error.code === tasks.ETIMEOUT : false;
|
||||
|
||||
await safe(eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage, timedOut, backupId }));
|
||||
});
|
||||
|
||||
callback(null, taskId);
|
||||
await safe(eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage, timedOut, backupId }), { debug });
|
||||
});
|
||||
|
||||
return taskId;
|
||||
}
|
||||
|
||||
async function list(page, perPage) {
|
||||
@@ -223,12 +218,14 @@ async function del(id) {
|
||||
}
|
||||
|
||||
function cleanupCacheFilesSync() {
|
||||
var files = safe.fs.readdirSync(path.join(paths.BACKUP_INFO_DIR));
|
||||
const files = safe.fs.readdirSync(path.join(paths.BACKUP_INFO_DIR));
|
||||
if (!files) return;
|
||||
|
||||
files.filter(function (f) { return f.endsWith('.sync.cache'); }).forEach(function (f) {
|
||||
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, f));
|
||||
});
|
||||
files
|
||||
.filter(function (f) { return f.endsWith('.sync.cache'); })
|
||||
.forEach(function (f) {
|
||||
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, f));
|
||||
});
|
||||
}
|
||||
|
||||
function getSnapshotInfo(id) {
|
||||
|
||||
+1
-1
@@ -167,7 +167,7 @@ function backupConfigChanged(value, tz) {
|
||||
|
||||
gJobs.backup = new CronJob({
|
||||
cronTime: value.schedulePattern,
|
||||
onTick: backups.startBackupTask.bind(null, auditSource.CRON, NOOP_CALLBACK),
|
||||
onTick: async () => await safe(backups.startBackupTask(auditSource.CRON), { debug }),
|
||||
start: true,
|
||||
timeZone: tz
|
||||
});
|
||||
|
||||
@@ -26,12 +26,11 @@ async function list(req, res, next) {
|
||||
next(new HttpSuccess(200, { backups: result }));
|
||||
}
|
||||
|
||||
function startBackup(req, res, next) {
|
||||
backups.startBackupTask(auditSource.fromRequest(req), function (error, taskId) {
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
async function startBackup(req, res, next) {
|
||||
const [error, taskId] = await safe(backups.startBackupTask(auditSource.fromRequest(req)));
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
|
||||
next(new HttpSuccess(202, { taskId }));
|
||||
});
|
||||
next(new HttpSuccess(202, { taskId }));
|
||||
}
|
||||
|
||||
async function cleanup(req, res, next) {
|
||||
|
||||
+27
-39
@@ -15,7 +15,6 @@ const backups = require('../backups.js'),
|
||||
fs = require('fs'),
|
||||
os = require('os'),
|
||||
path = require('path'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('../settings.js'),
|
||||
tasks = require('../tasks.js');
|
||||
|
||||
@@ -86,66 +85,55 @@ describe('backuptask', function () {
|
||||
fs.rmSync(backupConfig.backupFolder, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
function createBackup(callback) {
|
||||
backups.startBackupTask({ username: 'test' }, async function (error, taskId) { // this call does not wait for the backup!
|
||||
if (error) return callback(error);
|
||||
async function createBackup() {
|
||||
const taskId = await backups.startBackupTask({ username: 'test' });
|
||||
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
await delay(1000);
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
await delay(1000);
|
||||
|
||||
const p = await tasks.get(taskId);
|
||||
const p = await tasks.get(taskId);
|
||||
|
||||
if (p.percent !== 100) continue;
|
||||
if (p.error) return callback(new Error(`backup failed: ${p.error.message}`));
|
||||
if (!p.result) return callback(new Error('backup has no result:' + p));
|
||||
if (p.percent !== 100) continue;
|
||||
if (p.error) throw new Error(`backup failed: ${p.error.message}`);
|
||||
if (!p.result) throw new Error('backup has no result:' + p);
|
||||
|
||||
const [error, result] = await safe(backups.getByIdentifierAndStatePaged(backups.BACKUP_IDENTIFIER_BOX, backups.BACKUP_STATE_NORMAL, 1, 1));
|
||||
const result = await backups.getByIdentifierAndStatePaged(backups.BACKUP_IDENTIFIER_BOX, backups.BACKUP_STATE_NORMAL, 1, 1);
|
||||
|
||||
if (error) return callback(error);
|
||||
if (result.length !== 1) return callback(new Error('result is not of length 1'));
|
||||
if (result.length !== 1) throw new Error('result is not of length 1');
|
||||
|
||||
// the task progress and the db entry is set in the worker. wait for 2 seconds for backup lock to get released in parent process
|
||||
await delay(2000);
|
||||
// the task progress and the db entry is set in the worker. wait for 2 seconds for backup lock to get released in parent process
|
||||
await delay(2000);
|
||||
|
||||
callback(null, result[0]);
|
||||
}
|
||||
});
|
||||
return result[0];
|
||||
}
|
||||
}
|
||||
|
||||
it('can backup', function (done) {
|
||||
it('can backup', async function () {
|
||||
// arch only has maria db which lacks some mysqldump options we need, this is only here to allow running the tests :-/
|
||||
if (require('child_process').execSync('/usr/bin/mysqldump --version').toString().indexOf('MariaDB') !== -1) {
|
||||
console.log('test skipped because of MariaDB');
|
||||
return done();
|
||||
return;
|
||||
}
|
||||
|
||||
createBackup(function (error, result) {
|
||||
expect(error).to.be(null);
|
||||
expect(fs.statSync(path.join(backupConfig.backupFolder, 'snapshot/box.tar.gz')).nlink).to.be(2); // hard linked to a rotated backup
|
||||
expect(fs.statSync(path.join(backupConfig.backupFolder, `${result.id}.tar.gz`)).nlink).to.be(2);
|
||||
const result = await createBackup();
|
||||
expect(fs.statSync(path.join(backupConfig.backupFolder, 'snapshot/box.tar.gz')).nlink).to.be(2); // hard linked to a rotated backup
|
||||
expect(fs.statSync(path.join(backupConfig.backupFolder, `${result.id}.tar.gz`)).nlink).to.be(2);
|
||||
|
||||
backupInfo1 = result;
|
||||
|
||||
done();
|
||||
});
|
||||
backupInfo1 = result;
|
||||
});
|
||||
|
||||
it('can take another backup', function (done) {
|
||||
it('can take another backup', async function () {
|
||||
// arch only has maria db which lacks some mysqldump options we need, this is only here to allow running the tests :-/
|
||||
if (require('child_process').execSync('/usr/bin/mysqldump --version').toString().indexOf('MariaDB') !== -1) {
|
||||
console.log('test skipped because of MariaDB');
|
||||
return done();
|
||||
return;
|
||||
}
|
||||
|
||||
createBackup(function (error, result) {
|
||||
expect(error).to.be(null);
|
||||
expect(fs.statSync(path.join(backupConfig.backupFolder, 'snapshot/box.tar.gz')).nlink).to.be(2); // hard linked to a rotated backup
|
||||
expect(fs.statSync(path.join(backupConfig.backupFolder, `${result.id}.tar.gz`)).nlink).to.be(2); // hard linked to new backup
|
||||
expect(fs.statSync(path.join(backupConfig.backupFolder, `${backupInfo1.id}.tar.gz`)).nlink).to.be(1); // not hard linked anymore
|
||||
|
||||
done();
|
||||
});
|
||||
const result = await createBackup();
|
||||
expect(fs.statSync(path.join(backupConfig.backupFolder, 'snapshot/box.tar.gz')).nlink).to.be(2); // hard linked to a rotated backup
|
||||
expect(fs.statSync(path.join(backupConfig.backupFolder, `${result.id}.tar.gz`)).nlink).to.be(2); // hard linked to new backup
|
||||
expect(fs.statSync(path.join(backupConfig.backupFolder, `${backupInfo1.id}.tar.gz`)).nlink).to.be(1); // not hard linked anymore
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user