backuptarget: add default at provisioning time (again)

we clear the db in the tests. the tests run the setup again and again.
so, it's easier to add default values in provision time instead of
migration.
This commit is contained in:
Girish Ramakrishnan
2025-08-06 10:51:46 +02:00
parent 387c399078
commit f8d74bbb6d
5 changed files with 75 additions and 54 deletions

View File

@@ -26,66 +26,66 @@ exports.up = async function (db) {
const results = await db.runSql('SELECT name, value FROM settings WHERE name=? OR name=? OR name=?', [ 'backup_storage', 'backup_limits', 'backup_policy' ]);
const domainCountResults = await db.runSql('SELECT COUNT(*) AS total FROM domains');
if (domainCountResults[0].total === 0) {
console.log('This cloudron is not activated. Deleting the default backup config from 20171205124434-settings-default-backupConfig.js'); // will be added at provision time
await db.runSql('DELETE FROM settings WHERE name=? OR name=? OR name=?', [ 'backup_storage', 'backup_limits', 'backup_policy']);
return;
}
const name = 'Default', main = true;
let config = null, limits = null, encryption = null, format = null, provider = null;
let retention = { keepWithinSecs: 2 * 24 * 60 * 60 };
let schedule = '00 00 23 * * *';
const id = `bc-${crypto.randomUUID()}`;
if (results.length === 0) {
provider = 'filesystem';
config = { _provider: provider, backupDir: paths.DEFAULT_BACKUP_DIR };
format = 'tgz';
} else {
for (const r of results) {
if (r.name === 'backup_storage') {
const tmp = JSON.parse(r.value);
// provider is top level
provider = tmp.provider;
// convert existing configuration into a backup target
for (const r of results) {
if (r.name === 'backup_storage') {
const tmp = JSON.parse(r.value);
// provider is top level
provider = tmp.provider;
// the s3 and filesystem backend use the _provider internal property
if (provider !== 'gcs' && provider !== 'noop') tmp._provider = tmp.provider;
delete tmp.provider;
// the s3 and filesystem backend use the _provider internal property
if (provider !== 'gcs' && provider !== 'noop') tmp._provider = tmp.provider;
delete tmp.provider;
// backupFolder is now backupDir
if ('backupFolder' in tmp) {
tmp.backupDir = tmp.backupFolder;
delete tmp.backupFolder;
}
// encryption is not part of config anymore, it is top level
encryption = tmp.encryption || null;
delete tmp.encryption;
// format is not part of config anymore, it is top level
format = tmp.format;
delete tmp.format;
// previous releases only had a single "managed" mount at /mnt/cloudronbackup .
// new release has it under /mnt/managedbackups .
if (tmp.mountOptions) tmp._managedMountPath = '/mnt/cloudronbackup';
config = tmp;
} else if (r.name === 'backup_limits') {
limits = JSON.parse(r.value);
} else if (r.name === 'backup_policy') {
const tmp = JSON.parse(r.value);
retention = tmp.retention;
schedule = tmp.schedule;
// backupFolder is now backupDir
if ('backupFolder' in tmp) {
tmp.backupDir = tmp.backupFolder;
delete tmp.backupFolder;
}
// encryption is not part of config anymore, it is top level
encryption = tmp.encryption || null;
delete tmp.encryption;
// format is not part of config anymore, it is top level
format = tmp.format;
delete tmp.format;
// previous releases only had a single "managed" mount at /mnt/cloudronbackup .
// new release has it under /mnt/managedbackups .
if (tmp.mountOptions) tmp._managedMountPath = '/mnt/cloudronbackup';
config = tmp;
} else if (r.name === 'backup_limits') {
limits = JSON.parse(r.value);
} else if (r.name === 'backup_policy') {
const tmp = JSON.parse(r.value);
retention = tmp.retention;
schedule = tmp.schedule;
}
}
await db.runSql('START TRANSACTION');
const targetInfoDir = path.join(paths.BACKUP_INFO_DIR, id);
console.log(`Moving existing cache and snapshot file into ${targetInfoDir}`);
fs.mkdirSync(targetInfoDir, { recursive: true });
child_process.execSync(`find ${paths.BACKUP_INFO_DIR}/ -maxdepth 1 -type f -exec mv -t ${targetInfoDir}/ {} +`);
await db.runSql('START TRANSACTION');
await db.runSql('INSERT INTO backupTargets (id, name, provider, configJson, limitsJson, retentionJson, schedule, encryptionJson, format, main) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
[ id, name, provider, JSON.stringify(config), JSON.stringify(limits), JSON.stringify(retention), schedule, JSON.stringify(encryption), format, main ]);
await db.runSql('DELETE FROM settings WHERE name=? OR name=? OR name=?', [ 'backup_storage', 'backup_limits', 'backup_policy' ]);
await db.runSql('COMMIT');
};

View File

@@ -5,9 +5,10 @@ const crypto = require('crypto'),
paths = require('../src/paths.js');
exports.up = async function(db) {
let results = await db.runSql('SELECT format, COUNT(*) AS count FROM backups GROUP BY format WITH ROLLUP', []); // https://dev.mysql.com/doc/refman/8.4/en/group-by-modifiers.html
const backups = await db.runSql('SELECT format, COUNT(*) AS count FROM backups GROUP BY format WITH ROLLUP', []); // https://dev.mysql.com/doc/refman/8.4/en/group-by-modifiers.html
let tgzCount = 0, rsyncCount = 0, totalCount = 0;
for (const r of results) {
for (const r of backups) {
if (r.format === 'tgz') tgzCount = r.count;
else if (r.format === 'rsync') rsyncCount = r.count;
else if (r.format === null) totalCount = r.count;
@@ -17,12 +18,12 @@ exports.up = async function(db) {
else if (rsyncCount === totalCount) theOneFormat = 'rsync';
console.log(`Backup counts. rsync: ${rsyncCount} tgz: ${tgzCount} total: ${totalCount} . theOneFormat: ${theOneFormat}`);
results = await db.runSql('SELECT * FROM backupTargets');
const currentBackupTarget = results[0];
const backupTargets = await db.runSql('SELECT * FROM backupTargets');
const currentBackupTarget = backupTargets[0];
let cloneBackupTarget = null;
if (currentBackupTarget.format !== theOneFormat) {
if (totalCount && currentBackupTarget.format !== theOneFormat) {
const cloneId = `bc-${crypto.randomUUID()}`;
cloneBackupTarget = Object.assign({}, results[0], { id: cloneId });
cloneBackupTarget = Object.assign({}, backupTargets[0], { id: cloneId });
cloneBackupTarget.format = currentBackupTarget.format === 'rsync' ? 'tgz' : 'rsync';
cloneBackupTarget.priority = false;
cloneBackupTarget.name = 'Copy of Default';
@@ -36,13 +37,15 @@ exports.up = async function(db) {
}
await db.runSql('ALTER TABLE backups ADD targetId VARCHAR(128)');
if (currentBackupTarget.format === 'tgz') {
const ext = currentBackupTarget.encryptionJson ? '.tar.gz.enc' : '.tar.gz';
console.log(`Adjusting remotePath of existing tgz backups with ${ext}`);
await db.runSql('UPDATE backups SET remotePath=CONCAT(remotePath, ?) WHERE format=?', [ ext, 'tgz' ]);
if (totalCount) {
if (currentBackupTarget.format === 'tgz') {
const ext = currentBackupTarget.encryptionJson ? '.tar.gz.enc' : '.tar.gz';
console.log(`Adjusting remotePath of existing tgz backups with ${ext}`);
await db.runSql('UPDATE backups SET remotePath=CONCAT(remotePath, ?) WHERE format=?', [ ext, 'tgz' ]);
}
await db.runSql('UPDATE backups SET targetId=? WHERE format=?', [ currentBackupTarget.id, currentBackupTarget.format ]);
if (cloneBackupTarget) await db.runSql('UPDATE backups SET targetId=? WHERE format=?', [ cloneBackupTarget.id, cloneBackupTarget.format ]);
}
await db.runSql('UPDATE backups SET targetId=? WHERE format=?', [ currentBackupTarget.id, currentBackupTarget.format ]);
if (cloneBackupTarget) await db.runSql('UPDATE backups SET targetId=? WHERE format=?', [ cloneBackupTarget.id, cloneBackupTarget.format ]);
await db.runSql('ALTER TABLE backups MODIFY targetId VARCHAR(128) NOT NULL');
await db.runSql('ALTER TABLE backups ADD FOREIGN KEY(targetId) REFERENCES backupTargets(id)');
await db.runSql('ALTER TABLE backups DROP COLUMN format');

View File

@@ -52,7 +52,7 @@ OUT=`docker inspect mysql-server` || true
if [[ "${OUT}" = "[]" ]]; then
echo "=> Starting mysql-server..."
# ulimit nofile is required to make it work on archlinux https://github.com/docker-library/mysql/issues/579#issuecomment-519495808
docker run --name mysql-server --ulimit nofile=262144:262144 -e MYSQL_ROOT_PASSWORD=password -d mysql:8.0
docker run --init --name mysql-server --ulimit nofile=262144:262144 -e MYSQL_ROOT_PASSWORD=password -d mysql:8.0
else
echo "=> mysql-server already running. If you want to start fresh, run 'docker rm --force mysql-server'"
fi

View File

@@ -5,6 +5,7 @@ exports = module.exports = {
getPrimary,
list,
add,
addDefault,
del,
setConfig,
@@ -516,6 +517,22 @@ async function add(data, auditSource) {
return id;
}
async function addDefault(auditSource) {
assert.strictEqual(typeof auditSource, 'object');
debug('addDefault: adding default backup target');
const defaultBackupTarget = {
name: 'Default',
provider: 'filesystem',
config: { backupDir: paths.DEFAULT_BACKUP_DIR },
retention: { keepWithinSecs: 2 * 24 * 60 * 60 },
schedule: '00 00 23 * * *',
format: 'tgz'
};
defaultBackupTarget.id = await add(defaultBackupTarget, auditSource);
await setPrimary(defaultBackupTarget, auditSource);
}
// creates a backup target object that is not in the database
async function createPseudo(data) {
assert.strictEqual(typeof data, 'object');

View File

@@ -86,6 +86,7 @@ async function setupTask(domain, auditSource) {
await reverseProxy.ensureCertificate(location, {}, auditSource);
await ensureDhparams();
await dashboard.setupLocation(constants.DASHBOARD_SUBDOMAIN, domain, auditSource);
await backupTargets.addDefault(auditSource);
setProgress('setup', 'Done'),
await eventlog.add(eventlog.ACTION_PROVISION, auditSource, {});
} catch (error) {