diff --git a/dashboard/src/views/BackupListView.vue b/dashboard/src/views/BackupListView.vue
index a0edcfe54..37659c62f 100644
--- a/dashboard/src/views/BackupListView.vue
+++ b/dashboard/src/views/BackupListView.vue
@@ -6,7 +6,7 @@ const t = i18n.t;
import { ref, onMounted, useTemplateRef } from 'vue';
import { Button, ClipboardAction, Menu, FormGroup, TextInput, Checkbox, TableView, Dialog } from '@cloudron/pankow';
-import { prettyLongDate, prettyFileSize } from '@cloudron/pankow/utils';
+import { prettyLongDate } from '@cloudron/pankow/utils';
import { TASK_TYPES } from '../constants.js';
import Section from '../components/Section.vue';
import BackupsModel from '../models/BackupsModel.js';
@@ -374,7 +374,6 @@ onMounted(async () => {
{{ $t('backups.listing.appCount', { appCount: backup.contents.length }) }}
{{ $t('backups.listing.noApps') }}
- ({{ backup.stats.fileCount }} files - {{ prettyFileSize(backup.stats.size) }})
{{ backup.site.name }}
diff --git a/src/backupformat/rsync.js b/src/backupformat/rsync.js
index f1a6438ed..bac8525dd 100644
--- a/src/backupformat/rsync.js
+++ b/src/backupformat/rsync.js
@@ -43,7 +43,7 @@ async function addFile(sourceFile, encryption, uploader, progressCallback) {
const [openError, sourceHandle] = await safe(fs.promises.open(sourceFile, 'r'));
if (openError) {
debug(`addFile: ignoring disappeared file: ${sourceFile}`);
- return;
+ return null;
}
const sourceStream = sourceHandle.createReadStream(sourceFile, { autoClose: true });
@@ -69,6 +69,7 @@ async function addFile(sourceFile, encryption, uploader, progressCallback) {
// debug(`addFile: pipeline finished: ${JSON.stringify(ps.stats())}`);
await uploader.finish();
+
return {
stats: ps.stats(),
integrity: { size: ps.stats().transferred, sha256: hash.digest('hex') }
@@ -84,11 +85,12 @@ async function sync(backupSite, remotePath, dataLayout, progressCallback) {
// the number here has to take into account the s3.upload partSize (which is 10MB). So 20=200MB
const concurrency = backupSite.limits?.syncConcurrency || (backupSite.provider === 's3' ? 20 : 10);
const cacheFile = path.join(paths.BACKUP_INFO_DIR, backupSite.id, `${dataLayout.getBasename()}.sync.cache`);
- const { delQueue, addQueue, integrityMap } = await syncer.sync(dataLayout, cacheFile);
+ const { delQueue, addQueue, integrityMap } = await syncer.sync(dataLayout, cacheFile); // integrityMap is unchanged files
debug(`sync: processing ${delQueue.length} deletes and ${addQueue.length} additions`);
const aggregatedStats = {
+ transferred: 0,
size: [...integrityMap.values()].reduce((sum, { size }) => sum + size, 0),
- fileCount: integrityMap.size + addQueue.length,
+ fileCount: addQueue.length + integrityMap.size, // final file count, not the transferred file count
startTime: Date.now(),
totalMsecs: 0
};
@@ -111,9 +113,11 @@ async function sync(backupSite, remotePath, dataLayout, progressCallback) {
debug(`Adding ${change.path} position ${change.position} try ${retryCount}`);
const uploader = await backupSites.storageApi(backupSite).upload(backupSite.config, fullPath);
- const { integrity } = await addFile(dataLayout.toLocalPath('./' + change.path), backupSite.encryption, uploader, progressCallback);
- integrityMap.set(destPath, integrity);
- aggregatedStats.size += integrity.size;
+ const result = await addFile(dataLayout.toLocalPath('./' + change.path), backupSite.encryption, uploader, progressCallback);
+ if (!result) return; // this can happen if the file disappeared on us
+ integrityMap.set(destPath, result.integrity);
+ aggregatedStats.transferred += result.stats.transferred;
+ aggregatedStats.size += result.stats.transferred;
});
}
}
diff --git a/src/backupformat/tgz.js b/src/backupformat/tgz.js
index 252ca881c..6ac5becde 100644
--- a/src/backupformat/tgz.js
+++ b/src/backupformat/tgz.js
@@ -169,12 +169,14 @@ async function tarPack(dataLayout, encryption, uploader, progressCallback) {
const [error] = await pipeline; // already wrapped in safe()
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `tarPack pipeline error: ${error.message}`);
- debug(`tarPack: pipeline finished: ${JSON.stringify(ps.stats())}`);
+
+ const stats = ps.stats();
+ debug(`tarPack: pipeline finished: ${JSON.stringify(stats)}`);
await uploader.finish();
return {
- stats: { fileCount, ...ps.stats() },
- integrity: { size: ps.stats().transferred, fileCount, sha256: hash.digest('hex') }
+ stats: { fileCount, size: stats.transferred, ...stats },
+ integrity: { size: stats.transferred, fileCount, sha256: hash.digest('hex') }
};
}
diff --git a/src/backuptask.js b/src/backuptask.js
index c0005d13b..e07102724 100644
--- a/src/backuptask.js
+++ b/src/backuptask.js
@@ -99,6 +99,10 @@ async function upload(remotePath, siteId, dataLayoutString, progressCallback) {
await checkPreconditions(backupSite, dataLayout);
+ // integrity - { signature } of the uploaded .backupinfo .
+ // .backupinfo contains an integrityMap { size, fileCount, sha256 } of each file. for tgz, fileCount has the file count inside
+ // stats - { fileCount, size, startTime, totalMsecs, transferred } . size is the backup size .transferred is what was transferred. they differ for rsync
+ // fileCount and size in stats should match up .backupinfo
const { stats, integrityMap } = await backupFormats.api(backupSite.format).upload(backupSite, remotePath, dataLayout, progressCallback);
progressCallback({ message: `Uploading integrity information to ${remotePath}.backupinfo` });