clean up pve/ patches by squashing patches of patches

No functional change intended.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Stefan Reiter 2021-03-03 10:56:02 +01:00 committed by Thomas Lamprecht
parent 25b9156448
commit 0c893fd820
34 changed files with 830 additions and 2153 deletions

View File

@ -3,6 +3,9 @@ From: Dietmar Maurer <dietmar@proxmox.com>
Date: Mon, 6 Apr 2020 12:16:59 +0200
Subject: [PATCH] PVE-Backup: proxmox backup patches for qemu
Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
[PVE-Backup: avoid coroutines to fix AIO freeze, cleanups]
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
---
block/meson.build | 5 +
block/monitor/block-hmp-cmds.c | 33 ++
@ -15,11 +18,11 @@ Subject: [PATCH] PVE-Backup: proxmox backup patches for qemu
monitor/hmp-cmds.c | 44 ++
proxmox-backup-client.c | 176 ++++++
proxmox-backup-client.h | 59 ++
pve-backup.c | 955 +++++++++++++++++++++++++++++++++
pve-backup.c | 957 +++++++++++++++++++++++++++++++++
qapi/block-core.json | 109 ++++
qapi/common.json | 13 +
qapi/machine.json | 15 +-
15 files changed, 1444 insertions(+), 14 deletions(-)
15 files changed, 1446 insertions(+), 14 deletions(-)
create mode 100644 proxmox-backup-client.c
create mode 100644 proxmox-backup-client.h
create mode 100644 pve-backup.c
@ -507,10 +510,10 @@ index 0000000000..1dda8b7d8f
+#endif /* PROXMOX_BACKUP_CLIENT_H */
diff --git a/pve-backup.c b/pve-backup.c
new file mode 100644
index 0000000000..55441eb9d1
index 0000000000..d40f3f2fd6
--- /dev/null
+++ b/pve-backup.c
@@ -0,0 +1,955 @@
@@ -0,0 +1,957 @@
+#include "proxmox-backup-client.h"
+#include "vma.h"
+
@ -524,11 +527,27 @@ index 0000000000..55441eb9d1
+
+/* PVE backup state and related function */
+
+/*
+ * Note: A resume from a qemu_coroutine_yield can happen in a different thread,
+ * so you may not use normal mutexes within coroutines:
+ *
+ * ---bad-example---
+ * qemu_rec_mutex_lock(lock)
+ * ...
+ * qemu_coroutine_yield() // wait for something
+ * // we are now inside a different thread
+ * qemu_rec_mutex_unlock(lock) // Crash - wrong thread!!
+ * ---end-bad-example--
+ *
+ * ==> Always use CoMutext inside coroutines.
+ * ==> Never acquire/release AioContext withing coroutines (because that use QemuRecMutex)
+ *
+ */
+
+static struct PVEBackupState {
+ struct {
+ // Everithing accessed from qmp command, protected using rwlock
+ CoRwlock rwlock;
+ // Everithing accessed from qmp_backup_query command is protected using lock
+ QemuMutex lock;
+ Error *error;
+ time_t start_time;
+ time_t end_time;
@ -538,19 +557,20 @@ index 0000000000..55441eb9d1
+ size_t total;
+ size_t transferred;
+ size_t zero_bytes;
+ bool cancel;
+ } stat;
+ int64_t speed;
+ VmaWriter *vmaw;
+ ProxmoxBackupHandle *pbs;
+ GList *di_list;
+ CoMutex backup_mutex;
+ QemuMutex backup_mutex;
+ CoMutex dump_callback_mutex;
+} backup_state;
+
+static void pvebackup_init(void)
+{
+ qemu_co_rwlock_init(&backup_state.stat.rwlock);
+ qemu_co_mutex_init(&backup_state.backup_mutex);
+ qemu_mutex_init(&backup_state.stat.lock);
+ qemu_mutex_init(&backup_state.backup_mutex);
+ qemu_co_mutex_init(&backup_state.dump_callback_mutex);
+}
+
+// initialize PVEBackupState at startup
@ -565,10 +585,54 @@ index 0000000000..55441eb9d1
+ BlockDriverState *target;
+} PVEBackupDevInfo;
+
+static void pvebackup_co_run_next_job(void);
+static void pvebackup_run_next_job(void);
+
+static BlockJob *
+lookup_active_block_job(PVEBackupDevInfo *di)
+{
+ if (!di->completed && di->bs) {
+ for (BlockJob *job = block_job_next(NULL); job; job = block_job_next(job)) {
+ if (job->job.driver->job_type != JOB_TYPE_BACKUP) {
+ continue;
+ }
+
+ BackupBlockJob *bjob = container_of(job, BackupBlockJob, common);
+ if (bjob && bjob->source_bs == di->bs) {
+ return job;
+ }
+ }
+ }
+ return NULL;
+}
+
+static void pvebackup_propagate_error(Error *err)
+{
+ qemu_mutex_lock(&backup_state.stat.lock);
+ error_propagate(&backup_state.stat.error, err);
+ qemu_mutex_unlock(&backup_state.stat.lock);
+}
+
+static bool pvebackup_error_or_canceled(void)
+{
+ qemu_mutex_lock(&backup_state.stat.lock);
+ bool error_or_canceled = !!backup_state.stat.error;
+ qemu_mutex_unlock(&backup_state.stat.lock);
+
+ return error_or_canceled;
+}
+
+static void pvebackup_add_transfered_bytes(size_t transferred, size_t zero_bytes)
+{
+ qemu_mutex_lock(&backup_state.stat.lock);
+ backup_state.stat.zero_bytes += zero_bytes;
+ backup_state.stat.transferred += transferred;
+ qemu_mutex_unlock(&backup_state.stat.lock);
+}
+
+// This may get called from multiple coroutines in multiple io-threads
+// Note1: this may get called after job_cancel()
+static int coroutine_fn
+pvebackup_co_dump_cb(
+pvebackup_co_dump_pbs_cb(
+ void *opaque,
+ uint64_t start,
+ uint64_t bytes,
@ -580,137 +644,127 @@ index 0000000000..55441eb9d1
+ const unsigned char *buf = pbuf;
+ PVEBackupDevInfo *di = opaque;
+
+ qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
+ bool cancel = backup_state.stat.cancel;
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ assert(backup_state.pbs);
+
+ if (cancel) {
+ return size; // return success
+ Error *local_err = NULL;
+ int pbs_res = -1;
+
+ qemu_co_mutex_lock(&backup_state.dump_callback_mutex);
+
+ // avoid deadlock if job is cancelled
+ if (pvebackup_error_or_canceled()) {
+ qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
+ return -1;
+ }
+
+ qemu_co_mutex_lock(&backup_state.backup_mutex);
+
+ int ret = -1;
+
+ if (backup_state.vmaw) {
+ size_t zero_bytes = 0;
+ uint64_t remaining = size;
+
+ uint64_t cluster_num = start / VMA_CLUSTER_SIZE;
+ if ((cluster_num * VMA_CLUSTER_SIZE) != start) {
+ qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
+ if (!backup_state.stat.error) {
+ qemu_co_rwlock_upgrade(&backup_state.stat.rwlock);
+ error_setg(&backup_state.stat.error,
+ "got unaligned write inside backup dump "
+ "callback (sector %ld)", start);
+ }
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+ return -1; // not aligned to cluster size
+ }
+
+ while (remaining > 0) {
+ ret = vma_writer_write(backup_state.vmaw, di->dev_id, cluster_num,
+ buf, &zero_bytes);
+ ++cluster_num;
+ if (buf) {
+ buf += VMA_CLUSTER_SIZE;
+ }
+ if (ret < 0) {
+ qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
+ if (!backup_state.stat.error) {
+ qemu_co_rwlock_upgrade(&backup_state.stat.rwlock);
+ vma_writer_error_propagate(backup_state.vmaw, &backup_state.stat.error);
+ }
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+ return ret;
+ } else {
+ qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
+ backup_state.stat.zero_bytes += zero_bytes;
+ if (remaining >= VMA_CLUSTER_SIZE) {
+ backup_state.stat.transferred += VMA_CLUSTER_SIZE;
+ remaining -= VMA_CLUSTER_SIZE;
+ } else {
+ backup_state.stat.transferred += remaining;
+ remaining = 0;
+ }
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ }
+ }
+ } else if (backup_state.pbs) {
+ Error *local_err = NULL;
+ int pbs_res = -1;
+
+ pbs_res = proxmox_backup_co_write_data(backup_state.pbs, di->dev_id, buf, start, size, &local_err);
+
+ qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
+
+ if (pbs_res < 0) {
+ error_propagate(&backup_state.stat.error, local_err);
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+ return pbs_res;
+ } else {
+ if (!buf) {
+ backup_state.stat.zero_bytes += size;
+ }
+ backup_state.stat.transferred += size;
+ }
+
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ pbs_res = proxmox_backup_co_write_data(backup_state.pbs, di->dev_id, buf, start, size, &local_err);
+ qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
+
+ if (pbs_res < 0) {
+ pvebackup_propagate_error(local_err);
+ return pbs_res;
+ } else {
+ qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
+ if (!buf) {
+ backup_state.stat.zero_bytes += size;
+ }
+ backup_state.stat.transferred += size;
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ pvebackup_add_transfered_bytes(size, !buf ? size : 0);
+ }
+
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+
+ return size;
+}
+
+static void coroutine_fn pvebackup_co_cleanup(void)
+// This may get called from multiple coroutines in multiple io-threads
+static int coroutine_fn
+pvebackup_co_dump_vma_cb(
+ void *opaque,
+ uint64_t start,
+ uint64_t bytes,
+ const void *pbuf)
+{
+ assert(qemu_in_coroutine());
+
+ qemu_co_mutex_lock(&backup_state.backup_mutex);
+ const uint64_t size = bytes;
+ const unsigned char *buf = pbuf;
+ PVEBackupDevInfo *di = opaque;
+
+ qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
+ int ret = -1;
+
+ assert(backup_state.vmaw);
+
+ uint64_t remaining = size;
+
+ uint64_t cluster_num = start / VMA_CLUSTER_SIZE;
+ if ((cluster_num * VMA_CLUSTER_SIZE) != start) {
+ Error *local_err = NULL;
+ error_setg(&local_err,
+ "got unaligned write inside backup dump "
+ "callback (sector %ld)", start);
+ pvebackup_propagate_error(local_err);
+ return -1; // not aligned to cluster size
+ }
+
+ while (remaining > 0) {
+ qemu_co_mutex_lock(&backup_state.dump_callback_mutex);
+ // avoid deadlock if job is cancelled
+ if (pvebackup_error_or_canceled()) {
+ qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
+ return -1;
+ }
+
+ size_t zero_bytes = 0;
+ ret = vma_writer_write(backup_state.vmaw, di->dev_id, cluster_num, buf, &zero_bytes);
+ qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
+
+ ++cluster_num;
+ if (buf) {
+ buf += VMA_CLUSTER_SIZE;
+ }
+ if (ret < 0) {
+ Error *local_err = NULL;
+ vma_writer_error_propagate(backup_state.vmaw, &local_err);
+ pvebackup_propagate_error(local_err);
+ return ret;
+ } else {
+ if (remaining >= VMA_CLUSTER_SIZE) {
+ assert(ret == VMA_CLUSTER_SIZE);
+ pvebackup_add_transfered_bytes(VMA_CLUSTER_SIZE, zero_bytes);
+ remaining -= VMA_CLUSTER_SIZE;
+ } else {
+ assert(ret == remaining);
+ pvebackup_add_transfered_bytes(remaining, zero_bytes);
+ remaining = 0;
+ }
+ }
+ }
+
+ return size;
+}
+
+// assumes the caller holds backup_mutex
+static void coroutine_fn pvebackup_co_cleanup(void *unused)
+{
+ assert(qemu_in_coroutine());
+
+ qemu_mutex_lock(&backup_state.stat.lock);
+ backup_state.stat.end_time = time(NULL);
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ qemu_mutex_unlock(&backup_state.stat.lock);
+
+ if (backup_state.vmaw) {
+ Error *local_err = NULL;
+ vma_writer_close(backup_state.vmaw, &local_err);
+
+ if (local_err != NULL) {
+ qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
+ error_propagate(&backup_state.stat.error, local_err);
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ }
+ pvebackup_propagate_error(local_err);
+ }
+
+ backup_state.vmaw = NULL;
+ }
+
+ if (backup_state.pbs) {
+ qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
+ bool error_or_canceled = backup_state.stat.error || backup_state.stat.cancel;
+ if (!error_or_canceled) {
+ if (!pvebackup_error_or_canceled()) {
+ Error *local_err = NULL;
+ proxmox_backup_co_finish(backup_state.pbs, &local_err);
+ if (local_err != NULL) {
+ qemu_co_rwlock_upgrade(&backup_state.stat.rwlock);
+ error_propagate(&backup_state.stat.error, local_err);
+ }
+ pvebackup_propagate_error(local_err);
+ }
+ }
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+
+ proxmox_backup_disconnect(backup_state.pbs);
+ backup_state.pbs = NULL;
@ -718,43 +772,14 @@ index 0000000000..55441eb9d1
+
+ g_list_free(backup_state.di_list);
+ backup_state.di_list = NULL;
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+}
+
+typedef struct PVEBackupCompeteCallbackData {
+ PVEBackupDevInfo *di;
+ int result;
+} PVEBackupCompeteCallbackData;
+
+static void coroutine_fn pvebackup_co_complete_cb(void *opaque)
+// assumes the caller holds backup_mutex
+static void coroutine_fn pvebackup_complete_stream(void *opaque)
+{
+ assert(qemu_in_coroutine());
+ PVEBackupDevInfo *di = opaque;
+
+ PVEBackupCompeteCallbackData *cb_data = opaque;
+
+ qemu_co_mutex_lock(&backup_state.backup_mutex);
+
+ PVEBackupDevInfo *di = cb_data->di;
+ int ret = cb_data->result;
+
+ di->completed = true;
+
+ qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
+ bool error_or_canceled = backup_state.stat.error || backup_state.stat.cancel;
+
+ if (ret < 0 && !backup_state.stat.error) {
+ qemu_co_rwlock_upgrade(&backup_state.stat.rwlock);
+ error_setg(&backup_state.stat.error, "job failed with err %d - %s",
+ ret, strerror(-ret));
+ }
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+
+ di->bs = NULL;
+
+ if (di->target) {
+ bdrv_unref(di->target);
+ di->target = NULL;
+ }
+ bool error_or_canceled = pvebackup_error_or_canceled();
+
+ if (backup_state.vmaw) {
+ vma_writer_close_stream(backup_state.vmaw, di->dev_id);
@ -764,110 +789,101 @@ index 0000000000..55441eb9d1
+ Error *local_err = NULL;
+ proxmox_backup_co_close_image(backup_state.pbs, di->dev_id, &local_err);
+ if (local_err != NULL) {
+ qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
+ error_propagate(&backup_state.stat.error, local_err);
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ pvebackup_propagate_error(local_err);
+ }
+ }
+
+ // remove self from job queue
+ backup_state.di_list = g_list_remove(backup_state.di_list, di);
+ g_free(di);
+
+ int pending_jobs = g_list_length(backup_state.di_list);
+
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+
+ if (pending_jobs > 0) {
+ pvebackup_co_run_next_job();
+ } else {
+ pvebackup_co_cleanup();
+ }
+}
+
+static void pvebackup_complete_cb(void *opaque, int ret)
+{
+ // This can be called from the main loop, or from a coroutine
+ PVEBackupCompeteCallbackData cb_data = {
+ .di = opaque,
+ .result = ret,
+ };
+ assert(!qemu_in_coroutine());
+
+ if (qemu_in_coroutine()) {
+ pvebackup_co_complete_cb(&cb_data);
+ } else {
+ block_on_coroutine_fn(pvebackup_co_complete_cb, &cb_data);
+ PVEBackupDevInfo *di = opaque;
+
+ qemu_mutex_lock(&backup_state.backup_mutex);
+
+ di->completed = true;
+
+ if (ret < 0) {
+ Error *local_err = NULL;
+ error_setg(&local_err, "job failed with err %d - %s", ret, strerror(-ret));
+ pvebackup_propagate_error(local_err);
+ }
+
+ di->bs = NULL;
+
+ assert(di->target == NULL);
+
+ block_on_coroutine_fn(pvebackup_complete_stream, di);
+
+ // remove self from job queue
+ backup_state.di_list = g_list_remove(backup_state.di_list, di);
+
+ g_free(di);
+
+ qemu_mutex_unlock(&backup_state.backup_mutex);
+
+ pvebackup_run_next_job();
+}
+
+static void coroutine_fn pvebackup_co_cancel(void *opaque)
+static void pvebackup_cancel(void)
+{
+ assert(qemu_in_coroutine());
+ assert(!qemu_in_coroutine());
+
+ qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
+ backup_state.stat.cancel = true;
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ Error *cancel_err = NULL;
+ error_setg(&cancel_err, "backup canceled");
+ pvebackup_propagate_error(cancel_err);
+
+ qemu_co_mutex_lock(&backup_state.backup_mutex);
+
+ // Avoid race between block jobs and backup-cancel command:
+ if (!(backup_state.vmaw || backup_state.pbs)) {
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+ return;
+ }
+
+ qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
+ if (!backup_state.stat.error) {
+ qemu_co_rwlock_upgrade(&backup_state.stat.rwlock);
+ error_setg(&backup_state.stat.error, "backup cancelled");
+ }
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ qemu_mutex_lock(&backup_state.backup_mutex);
+
+ if (backup_state.vmaw) {
+ /* make sure vma writer does not block anymore */
+ vma_writer_set_error(backup_state.vmaw, "backup cancelled");
+ vma_writer_set_error(backup_state.vmaw, "backup canceled");
+ }
+
+ if (backup_state.pbs) {
+ proxmox_backup_abort(backup_state.pbs, "backup cancelled");
+ proxmox_backup_abort(backup_state.pbs, "backup canceled");
+ }
+
+ bool running_jobs = 0;
+ GList *l = backup_state.di_list;
+ while (l) {
+ PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
+ l = g_list_next(l);
+ if (!di->completed && di->bs) {
+ for (BlockJob *job = block_job_next(NULL); job; job = block_job_next(job)) {
+ if (job->job.driver->job_type != JOB_TYPE_BACKUP) {
+ continue;
+ }
+ qemu_mutex_unlock(&backup_state.backup_mutex);
+
+ BackupBlockJob *bjob = container_of(job, BackupBlockJob, common);
+ if (bjob && bjob->source_bs == di->bs) {
+ AioContext *aio_context = job->job.aio_context;
+ aio_context_acquire(aio_context);
+ for(;;) {
+
+ if (!di->completed) {
+ running_jobs += 1;
+ job_cancel(&job->job, false);
+ }
+ aio_context_release(aio_context);
+ }
+ BlockJob *next_job = NULL;
+
+ qemu_mutex_lock(&backup_state.backup_mutex);
+
+ GList *l = backup_state.di_list;
+ while (l) {
+ PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
+ l = g_list_next(l);
+
+ BlockJob *job = lookup_active_block_job(di);
+ if (job != NULL) {
+ next_job = job;
+ break;
+ }
+ }
+
+ qemu_mutex_unlock(&backup_state.backup_mutex);
+
+ if (next_job) {
+ AioContext *aio_context = next_job->job.aio_context;
+ aio_context_acquire(aio_context);
+ job_cancel_sync(&next_job->job);
+ aio_context_release(aio_context);
+ } else {
+ break;
+ }
+ }
+
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+
+ if (running_jobs == 0) pvebackup_co_cleanup(); // else job will call completion handler
+}
+
+void qmp_backup_cancel(Error **errp)
+{
+ block_on_coroutine_fn(pvebackup_co_cancel, NULL);
+ pvebackup_cancel();
+}
+
+// assumes the caller holds backup_mutex
+static int coroutine_fn pvebackup_co_add_config(
+ const char *file,
+ const char *name,
@ -919,46 +935,97 @@ index 0000000000..55441eb9d1
+
+bool job_should_pause(Job *job);
+
+static void coroutine_fn pvebackup_co_run_next_job(void)
+static void pvebackup_run_next_job(void)
+{
+ assert(qemu_in_coroutine());
+ assert(!qemu_in_coroutine());
+
+ qemu_co_mutex_lock(&backup_state.backup_mutex);
+ qemu_mutex_lock(&backup_state.backup_mutex);
+
+ GList *l = backup_state.di_list;
+ while (l) {
+ PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
+ l = g_list_next(l);
+ if (!di->completed && di->bs) {
+ for (BlockJob *job = block_job_next(NULL); job; job = block_job_next(job)) {
+ if (job->job.driver->job_type != JOB_TYPE_BACKUP) {
+ continue;
+
+ BlockJob *job = lookup_active_block_job(di);
+
+ if (job) {
+ qemu_mutex_unlock(&backup_state.backup_mutex);
+
+ AioContext *aio_context = job->job.aio_context;
+ aio_context_acquire(aio_context);
+
+ if (job_should_pause(&job->job)) {
+ bool error_or_canceled = pvebackup_error_or_canceled();
+ if (error_or_canceled) {
+ job_cancel_sync(&job->job);
+ } else {
+ job_resume(&job->job);
+ }
+ }
+ aio_context_release(aio_context);
+ return;
+ }
+ }
+
+ BackupBlockJob *bjob = container_of(job, BackupBlockJob, common);
+ if (bjob && bjob->source_bs == di->bs) {
+ AioContext *aio_context = job->job.aio_context;
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+ aio_context_acquire(aio_context);
+ block_on_coroutine_fn(pvebackup_co_cleanup, NULL); // no more jobs, run cleanup
+
+ if (job_should_pause(&job->job)) {
+ qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
+ bool error_or_canceled = backup_state.stat.error || backup_state.stat.cancel;
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ qemu_mutex_unlock(&backup_state.backup_mutex);
+}
+
+ if (error_or_canceled) {
+ job_cancel(&job->job, false);
+ } else {
+ job_resume(&job->job);
+ }
+ }
+ aio_context_release(aio_context);
+ return;
+ }
+static bool create_backup_jobs(void) {
+
+ assert(!qemu_in_coroutine());
+
+ Error *local_err = NULL;
+
+ /* create and start all jobs (paused state) */
+ GList *l = backup_state.di_list;
+ while (l) {
+ PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
+ l = g_list_next(l);
+
+ assert(di->target != NULL);
+
+ AioContext *aio_context = bdrv_get_aio_context(di->bs);
+ aio_context_acquire(aio_context);
+
+ BlockJob *job = backup_job_create(
+ NULL, di->bs, di->target, backup_state.speed, MIRROR_SYNC_MODE_FULL, NULL,
+ BITMAP_SYNC_MODE_NEVER, false, NULL, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
+ JOB_DEFAULT, pvebackup_complete_cb, di, 1, NULL, &local_err);
+
+ aio_context_release(aio_context);
+
+ if (!job || local_err != NULL) {
+ Error *create_job_err = NULL;
+ error_setg(&create_job_err, "backup_job_create failed: %s",
+ local_err ? error_get_pretty(local_err) : "null");
+
+ pvebackup_propagate_error(create_job_err);
+ break;
+ }
+ job_start(&job->job);
+
+ bdrv_unref(di->target);
+ di->target = NULL;
+ }
+
+ bool errors = pvebackup_error_or_canceled();
+
+ if (errors) {
+ l = backup_state.di_list;
+ while (l) {
+ PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
+ l = g_list_next(l);
+
+ if (di->target) {
+ bdrv_unref(di->target);
+ di->target = NULL;
+ }
+ }
+ }
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+
+ return errors;
+}
+
+typedef struct QmpBackupTask {
@ -989,7 +1056,8 @@ index 0000000000..55441eb9d1
+ UuidInfo *result;
+} QmpBackupTask;
+
+static void coroutine_fn pvebackup_co_start(void *opaque)
+// assumes the caller holds backup_mutex
+static void coroutine_fn pvebackup_co_prepare(void *opaque)
+{
+ assert(qemu_in_coroutine());
+
@ -1008,16 +1076,12 @@ index 0000000000..55441eb9d1
+ GList *di_list = NULL;
+ GList *l;
+ UuidInfo *uuid_info;
+ BlockJob *job;
+
+ const char *config_name = "qemu-server.conf";
+ const char *firewall_name = "qemu-server.fw";
+
+ qemu_co_mutex_lock(&backup_state.backup_mutex);
+
+ if (backup_state.di_list) {
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+ error_set(task->errp, ERROR_CLASS_GENERIC_ERROR,
+ error_set(task->errp, ERROR_CLASS_GENERIC_ERROR,
+ "previous backup not finished");
+ return;
+ }
@ -1140,7 +1204,7 @@ index 0000000000..55441eb9d1
+ if (dev_id < 0)
+ goto err;
+
+ if (!(di->target = bdrv_backup_dump_create(dump_cb_block_size, di->size, pvebackup_co_dump_cb, di, task->errp))) {
+ if (!(di->target = bdrv_backup_dump_create(dump_cb_block_size, di->size, pvebackup_co_dump_pbs_cb, di, task->errp))) {
+ goto err;
+ }
+
@ -1161,7 +1225,7 @@ index 0000000000..55441eb9d1
+ PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
+ l = g_list_next(l);
+
+ if (!(di->target = bdrv_backup_dump_create(VMA_CLUSTER_SIZE, di->size, pvebackup_co_dump_cb, di, task->errp))) {
+ if (!(di->target = bdrv_backup_dump_create(VMA_CLUSTER_SIZE, di->size, pvebackup_co_dump_vma_cb, di, task->errp))) {
+ goto err;
+ }
+
@ -1226,9 +1290,7 @@ index 0000000000..55441eb9d1
+ }
+ /* initialize global backup_state now */
+
+ qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
+
+ backup_state.stat.cancel = false;
+ qemu_mutex_lock(&backup_state.stat.lock);
+
+ if (backup_state.stat.error) {
+ error_free(backup_state.stat.error);
@ -1251,7 +1313,7 @@ index 0000000000..55441eb9d1
+ backup_state.stat.transferred = 0;
+ backup_state.stat.zero_bytes = 0;
+
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ qemu_mutex_unlock(&backup_state.stat.lock);
+
+ backup_state.speed = (task->has_speed && task->speed > 0) ? task->speed : 0;
+
@ -1260,48 +1322,6 @@ index 0000000000..55441eb9d1
+
+ backup_state.di_list = di_list;
+
+ /* start all jobs (paused state) */
+ l = di_list;
+ while (l) {
+ PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
+ l = g_list_next(l);
+
+ // make sure target runs in same aoi_context as source
+ AioContext *aio_context = bdrv_get_aio_context(di->bs);
+ aio_context_acquire(aio_context);
+ GSList *ignore = NULL;
+ bdrv_set_aio_context_ignore(di->target, aio_context, &ignore);
+ g_slist_free(ignore);
+ aio_context_release(aio_context);
+
+ job = backup_job_create(NULL, di->bs, di->target, backup_state.speed, MIRROR_SYNC_MODE_FULL, NULL,
+ BITMAP_SYNC_MODE_NEVER, false, NULL, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
+ JOB_DEFAULT, pvebackup_complete_cb, di, 1, NULL, &local_err);
+ if (!job || local_err != NULL) {
+ qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
+ error_setg(&backup_state.stat.error, "backup_job_create failed");
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ break;
+ }
+ job_start(&job->job);
+ if (di->target) {
+ bdrv_unref(di->target);
+ di->target = NULL;
+ }
+ }
+
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+
+ qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
+ bool no_errors = !backup_state.stat.error;
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+
+ if (no_errors) {
+ pvebackup_co_run_next_job(); // run one job
+ } else {
+ pvebackup_co_cancel(NULL);
+ }
+
+ uuid_info = g_malloc0(sizeof(*uuid_info));
+ uuid_info->UUID = uuid_str;
+
@ -1344,8 +1364,6 @@ index 0000000000..55441eb9d1
+ rmdir(backup_dir);
+ }
+
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+
+ task->result = NULL;
+ return;
+}
@ -1389,32 +1407,31 @@ index 0000000000..55441eb9d1
+ .errp = errp,
+ };
+
+ block_on_coroutine_fn(pvebackup_co_start, &task);
+ qemu_mutex_lock(&backup_state.backup_mutex);
+
+ block_on_coroutine_fn(pvebackup_co_prepare, &task);
+
+ if (*errp == NULL) {
+ create_backup_jobs();
+ qemu_mutex_unlock(&backup_state.backup_mutex);
+ pvebackup_run_next_job();
+ } else {
+ qemu_mutex_unlock(&backup_state.backup_mutex);
+ }
+
+ return task.result;
+}
+
+
+typedef struct QmpQueryBackupTask {
+ Error **errp;
+ BackupStatus *result;
+} QmpQueryBackupTask;
+
+static void coroutine_fn pvebackup_co_query(void *opaque)
+BackupStatus *qmp_query_backup(Error **errp)
+{
+ assert(qemu_in_coroutine());
+
+ QmpQueryBackupTask *task = opaque;
+
+ BackupStatus *info = g_malloc0(sizeof(*info));
+
+ qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
+ qemu_mutex_lock(&backup_state.stat.lock);
+
+ if (!backup_state.stat.start_time) {
+ /* not started, return {} */
+ task->result = info;
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ return;
+ qemu_mutex_unlock(&backup_state.stat.lock);
+ return info;
+ }
+
+ info->has_status = true;
@ -1450,21 +1467,9 @@ index 0000000000..55441eb9d1
+ info->has_transferred = true;
+ info->transferred = backup_state.stat.transferred;
+
+ task->result = info;
+ qemu_mutex_unlock(&backup_state.stat.lock);
+
+ qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+}
+
+BackupStatus *qmp_query_backup(Error **errp)
+{
+ QmpQueryBackupTask task = {
+ .errp = errp,
+ .result = NULL,
+ };
+
+ block_on_coroutine_fn(pvebackup_co_query, &task);
+
+ return task.result;
+ return info;
+}
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 7957b9867d..be67dc3376 100644

View File

@ -7,8 +7,8 @@ Subject: [PATCH] PVE-Backup: pbs-restore - new command to restore from proxmox
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
---
meson.build | 4 +
pbs-restore.c | 218 ++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 222 insertions(+)
pbs-restore.c | 224 ++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 228 insertions(+)
create mode 100644 pbs-restore.c
diff --git a/meson.build b/meson.build
@ -28,10 +28,10 @@ index 3094f98c47..6f1fafee14 100644
subdir('contrib/elf2dmp')
diff --git a/pbs-restore.c b/pbs-restore.c
new file mode 100644
index 0000000000..d4daee7e91
index 0000000000..4d3f925a1b
--- /dev/null
+++ b/pbs-restore.c
@@ -0,0 +1,218 @@
@@ -0,0 +1,224 @@
+/*
+ * Qemu image restore helper for Proxmox Backup
+ *
@ -195,13 +195,19 @@ index 0000000000..d4daee7e91
+ fprintf(stderr, "connecting to repository '%s'\n", repository);
+ }
+ char *pbs_error = NULL;
+ ProxmoxRestoreHandle *conn = proxmox_restore_connect(
+ ProxmoxRestoreHandle *conn = proxmox_restore_new(
+ repository, snapshot, password, keyfile, key_password, fingerprint, &pbs_error);
+ if (conn == NULL) {
+ fprintf(stderr, "restore failed: %s\n", pbs_error);
+ return -1;
+ }
+
+ int res = proxmox_restore_connect(conn, &pbs_error);
+ if (res < 0 || pbs_error) {
+ fprintf(stderr, "restore failed (connection error): %s\n", pbs_error);
+ return -1;
+ }
+
+ QDict *options = qdict_new();
+
+ if (format) {
@ -232,7 +238,7 @@ index 0000000000..d4daee7e91
+ fprintf(stderr, "starting to restore snapshot '%s'\n", snapshot);
+ fflush(stderr); // ensure we do not get printed after the progress log
+ }
+ int res = proxmox_restore_image(
+ res = proxmox_restore_image(
+ conn,
+ archive_name,
+ write_callback,

View File

@ -1,914 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Dietmar Maurer <dietmar@proxmox.com>
Date: Mon, 6 Apr 2020 12:17:00 +0200
Subject: [PATCH] PVE-Backup: avoid coroutines to fix AIO freeze, cleanups
We observed various AIO pool loop freezes, so we decided to avoid
coroutines and restrict ourselfes using similar code as upstream
(see blockdev.c: do_backup_common).
* avoid coroutine for job related code (causes hangs with iothreads)
- We then acquire/release all mutexes outside coroutines now, so we can now
correctly use a normal mutex.
* split pvebackup_co_dump_cb into:
- pvebackup_co_dump_pbs_cb and
- pvebackup_co_dump_pbs_cb
* new helper functions
- pvebackup_propagate_error
- pvebackup_error_or_canceled
- pvebackup_add_transfered_bytes
* avoid cancel flag (not needed)
* simplify backup_cancel logic
There is progress on upstream to support running qmp commands inside
coroutines, see:
https://lists.gnu.org/archive/html/qemu-devel/2020-02/msg04852.html
We should consider using that when it is available in upstream qemu.
Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
---
pve-backup.c | 638 ++++++++++++++++++++++++++-------------------------
1 file changed, 320 insertions(+), 318 deletions(-)
diff --git a/pve-backup.c b/pve-backup.c
index 55441eb9d1..d40f3f2fd6 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -11,11 +11,27 @@
/* PVE backup state and related function */
+/*
+ * Note: A resume from a qemu_coroutine_yield can happen in a different thread,
+ * so you may not use normal mutexes within coroutines:
+ *
+ * ---bad-example---
+ * qemu_rec_mutex_lock(lock)
+ * ...
+ * qemu_coroutine_yield() // wait for something
+ * // we are now inside a different thread
+ * qemu_rec_mutex_unlock(lock) // Crash - wrong thread!!
+ * ---end-bad-example--
+ *
+ * ==> Always use CoMutext inside coroutines.
+ * ==> Never acquire/release AioContext withing coroutines (because that use QemuRecMutex)
+ *
+ */
static struct PVEBackupState {
struct {
- // Everithing accessed from qmp command, protected using rwlock
- CoRwlock rwlock;
+ // Everithing accessed from qmp_backup_query command is protected using lock
+ QemuMutex lock;
Error *error;
time_t start_time;
time_t end_time;
@@ -25,19 +41,20 @@ static struct PVEBackupState {
size_t total;
size_t transferred;
size_t zero_bytes;
- bool cancel;
} stat;
int64_t speed;
VmaWriter *vmaw;
ProxmoxBackupHandle *pbs;
GList *di_list;
- CoMutex backup_mutex;
+ QemuMutex backup_mutex;
+ CoMutex dump_callback_mutex;
} backup_state;
static void pvebackup_init(void)
{
- qemu_co_rwlock_init(&backup_state.stat.rwlock);
- qemu_co_mutex_init(&backup_state.backup_mutex);
+ qemu_mutex_init(&backup_state.stat.lock);
+ qemu_mutex_init(&backup_state.backup_mutex);
+ qemu_co_mutex_init(&backup_state.dump_callback_mutex);
}
// initialize PVEBackupState at startup
@@ -52,10 +69,54 @@ typedef struct PVEBackupDevInfo {
BlockDriverState *target;
} PVEBackupDevInfo;
-static void pvebackup_co_run_next_job(void);
+static void pvebackup_run_next_job(void);
+static BlockJob *
+lookup_active_block_job(PVEBackupDevInfo *di)
+{
+ if (!di->completed && di->bs) {
+ for (BlockJob *job = block_job_next(NULL); job; job = block_job_next(job)) {
+ if (job->job.driver->job_type != JOB_TYPE_BACKUP) {
+ continue;
+ }
+
+ BackupBlockJob *bjob = container_of(job, BackupBlockJob, common);
+ if (bjob && bjob->source_bs == di->bs) {
+ return job;
+ }
+ }
+ }
+ return NULL;
+}
+
+static void pvebackup_propagate_error(Error *err)
+{
+ qemu_mutex_lock(&backup_state.stat.lock);
+ error_propagate(&backup_state.stat.error, err);
+ qemu_mutex_unlock(&backup_state.stat.lock);
+}
+
+static bool pvebackup_error_or_canceled(void)
+{
+ qemu_mutex_lock(&backup_state.stat.lock);
+ bool error_or_canceled = !!backup_state.stat.error;
+ qemu_mutex_unlock(&backup_state.stat.lock);
+
+ return error_or_canceled;
+}
+
+static void pvebackup_add_transfered_bytes(size_t transferred, size_t zero_bytes)
+{
+ qemu_mutex_lock(&backup_state.stat.lock);
+ backup_state.stat.zero_bytes += zero_bytes;
+ backup_state.stat.transferred += transferred;
+ qemu_mutex_unlock(&backup_state.stat.lock);
+}
+
+// This may get called from multiple coroutines in multiple io-threads
+// Note1: this may get called after job_cancel()
static int coroutine_fn
-pvebackup_co_dump_cb(
+pvebackup_co_dump_pbs_cb(
void *opaque,
uint64_t start,
uint64_t bytes,
@@ -67,137 +128,127 @@ pvebackup_co_dump_cb(
const unsigned char *buf = pbuf;
PVEBackupDevInfo *di = opaque;
- qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
- bool cancel = backup_state.stat.cancel;
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ assert(backup_state.pbs);
+
+ Error *local_err = NULL;
+ int pbs_res = -1;
+
+ qemu_co_mutex_lock(&backup_state.dump_callback_mutex);
- if (cancel) {
- return size; // return success
+ // avoid deadlock if job is cancelled
+ if (pvebackup_error_or_canceled()) {
+ qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
+ return -1;
}
- qemu_co_mutex_lock(&backup_state.backup_mutex);
+ pbs_res = proxmox_backup_co_write_data(backup_state.pbs, di->dev_id, buf, start, size, &local_err);
+ qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
- int ret = -1;
+ if (pbs_res < 0) {
+ pvebackup_propagate_error(local_err);
+ return pbs_res;
+ } else {
+ pvebackup_add_transfered_bytes(size, !buf ? size : 0);
+ }
- if (backup_state.vmaw) {
- size_t zero_bytes = 0;
- uint64_t remaining = size;
-
- uint64_t cluster_num = start / VMA_CLUSTER_SIZE;
- if ((cluster_num * VMA_CLUSTER_SIZE) != start) {
- qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
- if (!backup_state.stat.error) {
- qemu_co_rwlock_upgrade(&backup_state.stat.rwlock);
- error_setg(&backup_state.stat.error,
- "got unaligned write inside backup dump "
- "callback (sector %ld)", start);
- }
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
- qemu_co_mutex_unlock(&backup_state.backup_mutex);
- return -1; // not aligned to cluster size
- }
+ return size;
+}
- while (remaining > 0) {
- ret = vma_writer_write(backup_state.vmaw, di->dev_id, cluster_num,
- buf, &zero_bytes);
- ++cluster_num;
- if (buf) {
- buf += VMA_CLUSTER_SIZE;
- }
- if (ret < 0) {
- qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
- if (!backup_state.stat.error) {
- qemu_co_rwlock_upgrade(&backup_state.stat.rwlock);
- vma_writer_error_propagate(backup_state.vmaw, &backup_state.stat.error);
- }
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+// This may get called from multiple coroutines in multiple io-threads
+static int coroutine_fn
+pvebackup_co_dump_vma_cb(
+ void *opaque,
+ uint64_t start,
+ uint64_t bytes,
+ const void *pbuf)
+{
+ assert(qemu_in_coroutine());
- qemu_co_mutex_unlock(&backup_state.backup_mutex);
- return ret;
- } else {
- qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
- backup_state.stat.zero_bytes += zero_bytes;
- if (remaining >= VMA_CLUSTER_SIZE) {
- backup_state.stat.transferred += VMA_CLUSTER_SIZE;
- remaining -= VMA_CLUSTER_SIZE;
- } else {
- backup_state.stat.transferred += remaining;
- remaining = 0;
- }
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
- }
- }
- } else if (backup_state.pbs) {
- Error *local_err = NULL;
- int pbs_res = -1;
+ const uint64_t size = bytes;
+ const unsigned char *buf = pbuf;
+ PVEBackupDevInfo *di = opaque;
- pbs_res = proxmox_backup_co_write_data(backup_state.pbs, di->dev_id, buf, start, size, &local_err);
+ int ret = -1;
- qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
+ assert(backup_state.vmaw);
- if (pbs_res < 0) {
- error_propagate(&backup_state.stat.error, local_err);
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
- qemu_co_mutex_unlock(&backup_state.backup_mutex);
- return pbs_res;
- } else {
- if (!buf) {
- backup_state.stat.zero_bytes += size;
- }
- backup_state.stat.transferred += size;
+ uint64_t remaining = size;
+
+ uint64_t cluster_num = start / VMA_CLUSTER_SIZE;
+ if ((cluster_num * VMA_CLUSTER_SIZE) != start) {
+ Error *local_err = NULL;
+ error_setg(&local_err,
+ "got unaligned write inside backup dump "
+ "callback (sector %ld)", start);
+ pvebackup_propagate_error(local_err);
+ return -1; // not aligned to cluster size
+ }
+
+ while (remaining > 0) {
+ qemu_co_mutex_lock(&backup_state.dump_callback_mutex);
+ // avoid deadlock if job is cancelled
+ if (pvebackup_error_or_canceled()) {
+ qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
+ return -1;
}
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ size_t zero_bytes = 0;
+ ret = vma_writer_write(backup_state.vmaw, di->dev_id, cluster_num, buf, &zero_bytes);
+ qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
- } else {
- qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
- if (!buf) {
- backup_state.stat.zero_bytes += size;
+ ++cluster_num;
+ if (buf) {
+ buf += VMA_CLUSTER_SIZE;
+ }
+ if (ret < 0) {
+ Error *local_err = NULL;
+ vma_writer_error_propagate(backup_state.vmaw, &local_err);
+ pvebackup_propagate_error(local_err);
+ return ret;
+ } else {
+ if (remaining >= VMA_CLUSTER_SIZE) {
+ assert(ret == VMA_CLUSTER_SIZE);
+ pvebackup_add_transfered_bytes(VMA_CLUSTER_SIZE, zero_bytes);
+ remaining -= VMA_CLUSTER_SIZE;
+ } else {
+ assert(ret == remaining);
+ pvebackup_add_transfered_bytes(remaining, zero_bytes);
+ remaining = 0;
+ }
}
- backup_state.stat.transferred += size;
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
}
- qemu_co_mutex_unlock(&backup_state.backup_mutex);
-
return size;
}
-static void coroutine_fn pvebackup_co_cleanup(void)
+// assumes the caller holds backup_mutex
+static void coroutine_fn pvebackup_co_cleanup(void *unused)
{
assert(qemu_in_coroutine());
- qemu_co_mutex_lock(&backup_state.backup_mutex);
-
- qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
+ qemu_mutex_lock(&backup_state.stat.lock);
backup_state.stat.end_time = time(NULL);
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ qemu_mutex_unlock(&backup_state.stat.lock);
if (backup_state.vmaw) {
Error *local_err = NULL;
vma_writer_close(backup_state.vmaw, &local_err);
if (local_err != NULL) {
- qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
- error_propagate(&backup_state.stat.error, local_err);
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
- }
+ pvebackup_propagate_error(local_err);
+ }
backup_state.vmaw = NULL;
}
if (backup_state.pbs) {
- qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
- bool error_or_canceled = backup_state.stat.error || backup_state.stat.cancel;
- if (!error_or_canceled) {
+ if (!pvebackup_error_or_canceled()) {
Error *local_err = NULL;
proxmox_backup_co_finish(backup_state.pbs, &local_err);
if (local_err != NULL) {
- qemu_co_rwlock_upgrade(&backup_state.stat.rwlock);
- error_propagate(&backup_state.stat.error, local_err);
- }
+ pvebackup_propagate_error(local_err);
+ }
}
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
proxmox_backup_disconnect(backup_state.pbs);
backup_state.pbs = NULL;
@@ -205,43 +256,14 @@ static void coroutine_fn pvebackup_co_cleanup(void)
g_list_free(backup_state.di_list);
backup_state.di_list = NULL;
- qemu_co_mutex_unlock(&backup_state.backup_mutex);
}
-typedef struct PVEBackupCompeteCallbackData {
- PVEBackupDevInfo *di;
- int result;
-} PVEBackupCompeteCallbackData;
-
-static void coroutine_fn pvebackup_co_complete_cb(void *opaque)
+// assumes the caller holds backup_mutex
+static void coroutine_fn pvebackup_complete_stream(void *opaque)
{
- assert(qemu_in_coroutine());
-
- PVEBackupCompeteCallbackData *cb_data = opaque;
-
- qemu_co_mutex_lock(&backup_state.backup_mutex);
-
- PVEBackupDevInfo *di = cb_data->di;
- int ret = cb_data->result;
-
- di->completed = true;
-
- qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
- bool error_or_canceled = backup_state.stat.error || backup_state.stat.cancel;
-
- if (ret < 0 && !backup_state.stat.error) {
- qemu_co_rwlock_upgrade(&backup_state.stat.rwlock);
- error_setg(&backup_state.stat.error, "job failed with err %d - %s",
- ret, strerror(-ret));
- }
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
-
- di->bs = NULL;
+ PVEBackupDevInfo *di = opaque;
- if (di->target) {
- bdrv_unref(di->target);
- di->target = NULL;
- }
+ bool error_or_canceled = pvebackup_error_or_canceled();
if (backup_state.vmaw) {
vma_writer_close_stream(backup_state.vmaw, di->dev_id);
@@ -251,110 +273,101 @@ static void coroutine_fn pvebackup_co_complete_cb(void *opaque)
Error *local_err = NULL;
proxmox_backup_co_close_image(backup_state.pbs, di->dev_id, &local_err);
if (local_err != NULL) {
- qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
- error_propagate(&backup_state.stat.error, local_err);
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ pvebackup_propagate_error(local_err);
}
}
+}
- // remove self from job queue
- backup_state.di_list = g_list_remove(backup_state.di_list, di);
- g_free(di);
+static void pvebackup_complete_cb(void *opaque, int ret)
+{
+ assert(!qemu_in_coroutine());
- int pending_jobs = g_list_length(backup_state.di_list);
+ PVEBackupDevInfo *di = opaque;
- qemu_co_mutex_unlock(&backup_state.backup_mutex);
+ qemu_mutex_lock(&backup_state.backup_mutex);
- if (pending_jobs > 0) {
- pvebackup_co_run_next_job();
- } else {
- pvebackup_co_cleanup();
+ di->completed = true;
+
+ if (ret < 0) {
+ Error *local_err = NULL;
+ error_setg(&local_err, "job failed with err %d - %s", ret, strerror(-ret));
+ pvebackup_propagate_error(local_err);
}
-}
-static void pvebackup_complete_cb(void *opaque, int ret)
-{
- // This can be called from the main loop, or from a coroutine
- PVEBackupCompeteCallbackData cb_data = {
- .di = opaque,
- .result = ret,
- };
+ di->bs = NULL;
- if (qemu_in_coroutine()) {
- pvebackup_co_complete_cb(&cb_data);
- } else {
- block_on_coroutine_fn(pvebackup_co_complete_cb, &cb_data);
- }
-}
+ assert(di->target == NULL);
-static void coroutine_fn pvebackup_co_cancel(void *opaque)
-{
- assert(qemu_in_coroutine());
+ block_on_coroutine_fn(pvebackup_complete_stream, di);
- qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
- backup_state.stat.cancel = true;
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ // remove self from job queue
+ backup_state.di_list = g_list_remove(backup_state.di_list, di);
- qemu_co_mutex_lock(&backup_state.backup_mutex);
+ g_free(di);
- // Avoid race between block jobs and backup-cancel command:
- if (!(backup_state.vmaw || backup_state.pbs)) {
- qemu_co_mutex_unlock(&backup_state.backup_mutex);
- return;
- }
+ qemu_mutex_unlock(&backup_state.backup_mutex);
- qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
- if (!backup_state.stat.error) {
- qemu_co_rwlock_upgrade(&backup_state.stat.rwlock);
- error_setg(&backup_state.stat.error, "backup cancelled");
- }
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ pvebackup_run_next_job();
+}
+
+static void pvebackup_cancel(void)
+{
+ assert(!qemu_in_coroutine());
+
+ Error *cancel_err = NULL;
+ error_setg(&cancel_err, "backup canceled");
+ pvebackup_propagate_error(cancel_err);
+
+ qemu_mutex_lock(&backup_state.backup_mutex);
if (backup_state.vmaw) {
/* make sure vma writer does not block anymore */
- vma_writer_set_error(backup_state.vmaw, "backup cancelled");
+ vma_writer_set_error(backup_state.vmaw, "backup canceled");
}
if (backup_state.pbs) {
- proxmox_backup_abort(backup_state.pbs, "backup cancelled");
+ proxmox_backup_abort(backup_state.pbs, "backup canceled");
}
- bool running_jobs = 0;
- GList *l = backup_state.di_list;
- while (l) {
- PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
- l = g_list_next(l);
- if (!di->completed && di->bs) {
- for (BlockJob *job = block_job_next(NULL); job; job = block_job_next(job)) {
- if (job->job.driver->job_type != JOB_TYPE_BACKUP) {
- continue;
- }
+ qemu_mutex_unlock(&backup_state.backup_mutex);
- BackupBlockJob *bjob = container_of(job, BackupBlockJob, common);
- if (bjob && bjob->source_bs == di->bs) {
- AioContext *aio_context = job->job.aio_context;
- aio_context_acquire(aio_context);
+ for(;;) {
- if (!di->completed) {
- running_jobs += 1;
- job_cancel(&job->job, false);
- }
- aio_context_release(aio_context);
- }
+ BlockJob *next_job = NULL;
+
+ qemu_mutex_lock(&backup_state.backup_mutex);
+
+ GList *l = backup_state.di_list;
+ while (l) {
+ PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
+ l = g_list_next(l);
+
+ BlockJob *job = lookup_active_block_job(di);
+ if (job != NULL) {
+ next_job = job;
+ break;
}
}
- }
- qemu_co_mutex_unlock(&backup_state.backup_mutex);
+ qemu_mutex_unlock(&backup_state.backup_mutex);
- if (running_jobs == 0) pvebackup_co_cleanup(); // else job will call completion handler
+ if (next_job) {
+ AioContext *aio_context = next_job->job.aio_context;
+ aio_context_acquire(aio_context);
+ job_cancel_sync(&next_job->job);
+ aio_context_release(aio_context);
+ } else {
+ break;
+ }
+ }
}
void qmp_backup_cancel(Error **errp)
{
- block_on_coroutine_fn(pvebackup_co_cancel, NULL);
+ pvebackup_cancel();
}
+// assumes the caller holds backup_mutex
static int coroutine_fn pvebackup_co_add_config(
const char *file,
const char *name,
@@ -406,46 +419,97 @@ static int coroutine_fn pvebackup_co_add_config(
bool job_should_pause(Job *job);
-static void coroutine_fn pvebackup_co_run_next_job(void)
+static void pvebackup_run_next_job(void)
{
- assert(qemu_in_coroutine());
+ assert(!qemu_in_coroutine());
- qemu_co_mutex_lock(&backup_state.backup_mutex);
+ qemu_mutex_lock(&backup_state.backup_mutex);
GList *l = backup_state.di_list;
while (l) {
PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
l = g_list_next(l);
- if (!di->completed && di->bs) {
- for (BlockJob *job = block_job_next(NULL); job; job = block_job_next(job)) {
- if (job->job.driver->job_type != JOB_TYPE_BACKUP) {
- continue;
- }
- BackupBlockJob *bjob = container_of(job, BackupBlockJob, common);
- if (bjob && bjob->source_bs == di->bs) {
- AioContext *aio_context = job->job.aio_context;
- qemu_co_mutex_unlock(&backup_state.backup_mutex);
- aio_context_acquire(aio_context);
-
- if (job_should_pause(&job->job)) {
- qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
- bool error_or_canceled = backup_state.stat.error || backup_state.stat.cancel;
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
-
- if (error_or_canceled) {
- job_cancel(&job->job, false);
- } else {
- job_resume(&job->job);
- }
- }
- aio_context_release(aio_context);
- return;
+ BlockJob *job = lookup_active_block_job(di);
+
+ if (job) {
+ qemu_mutex_unlock(&backup_state.backup_mutex);
+
+ AioContext *aio_context = job->job.aio_context;
+ aio_context_acquire(aio_context);
+
+ if (job_should_pause(&job->job)) {
+ bool error_or_canceled = pvebackup_error_or_canceled();
+ if (error_or_canceled) {
+ job_cancel_sync(&job->job);
+ } else {
+ job_resume(&job->job);
}
}
+ aio_context_release(aio_context);
+ return;
+ }
+ }
+
+ block_on_coroutine_fn(pvebackup_co_cleanup, NULL); // no more jobs, run cleanup
+
+ qemu_mutex_unlock(&backup_state.backup_mutex);
+}
+
+static bool create_backup_jobs(void) {
+
+ assert(!qemu_in_coroutine());
+
+ Error *local_err = NULL;
+
+ /* create and start all jobs (paused state) */
+ GList *l = backup_state.di_list;
+ while (l) {
+ PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
+ l = g_list_next(l);
+
+ assert(di->target != NULL);
+
+ AioContext *aio_context = bdrv_get_aio_context(di->bs);
+ aio_context_acquire(aio_context);
+
+ BlockJob *job = backup_job_create(
+ NULL, di->bs, di->target, backup_state.speed, MIRROR_SYNC_MODE_FULL, NULL,
+ BITMAP_SYNC_MODE_NEVER, false, NULL, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
+ JOB_DEFAULT, pvebackup_complete_cb, di, 1, NULL, &local_err);
+
+ aio_context_release(aio_context);
+
+ if (!job || local_err != NULL) {
+ Error *create_job_err = NULL;
+ error_setg(&create_job_err, "backup_job_create failed: %s",
+ local_err ? error_get_pretty(local_err) : "null");
+
+ pvebackup_propagate_error(create_job_err);
+ break;
+ }
+ job_start(&job->job);
+
+ bdrv_unref(di->target);
+ di->target = NULL;
+ }
+
+ bool errors = pvebackup_error_or_canceled();
+
+ if (errors) {
+ l = backup_state.di_list;
+ while (l) {
+ PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
+ l = g_list_next(l);
+
+ if (di->target) {
+ bdrv_unref(di->target);
+ di->target = NULL;
+ }
}
}
- qemu_co_mutex_unlock(&backup_state.backup_mutex);
+
+ return errors;
}
typedef struct QmpBackupTask {
@@ -476,7 +540,8 @@ typedef struct QmpBackupTask {
UuidInfo *result;
} QmpBackupTask;
-static void coroutine_fn pvebackup_co_start(void *opaque)
+// assumes the caller holds backup_mutex
+static void coroutine_fn pvebackup_co_prepare(void *opaque)
{
assert(qemu_in_coroutine());
@@ -495,16 +560,12 @@ static void coroutine_fn pvebackup_co_start(void *opaque)
GList *di_list = NULL;
GList *l;
UuidInfo *uuid_info;
- BlockJob *job;
const char *config_name = "qemu-server.conf";
const char *firewall_name = "qemu-server.fw";
- qemu_co_mutex_lock(&backup_state.backup_mutex);
-
if (backup_state.di_list) {
- qemu_co_mutex_unlock(&backup_state.backup_mutex);
- error_set(task->errp, ERROR_CLASS_GENERIC_ERROR,
+ error_set(task->errp, ERROR_CLASS_GENERIC_ERROR,
"previous backup not finished");
return;
}
@@ -627,7 +688,7 @@ static void coroutine_fn pvebackup_co_start(void *opaque)
if (dev_id < 0)
goto err;
- if (!(di->target = bdrv_backup_dump_create(dump_cb_block_size, di->size, pvebackup_co_dump_cb, di, task->errp))) {
+ if (!(di->target = bdrv_backup_dump_create(dump_cb_block_size, di->size, pvebackup_co_dump_pbs_cb, di, task->errp))) {
goto err;
}
@@ -648,7 +709,7 @@ static void coroutine_fn pvebackup_co_start(void *opaque)
PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
l = g_list_next(l);
- if (!(di->target = bdrv_backup_dump_create(VMA_CLUSTER_SIZE, di->size, pvebackup_co_dump_cb, di, task->errp))) {
+ if (!(di->target = bdrv_backup_dump_create(VMA_CLUSTER_SIZE, di->size, pvebackup_co_dump_vma_cb, di, task->errp))) {
goto err;
}
@@ -713,9 +774,7 @@ static void coroutine_fn pvebackup_co_start(void *opaque)
}
/* initialize global backup_state now */
- qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
-
- backup_state.stat.cancel = false;
+ qemu_mutex_lock(&backup_state.stat.lock);
if (backup_state.stat.error) {
error_free(backup_state.stat.error);
@@ -738,7 +797,7 @@ static void coroutine_fn pvebackup_co_start(void *opaque)
backup_state.stat.transferred = 0;
backup_state.stat.zero_bytes = 0;
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
+ qemu_mutex_unlock(&backup_state.stat.lock);
backup_state.speed = (task->has_speed && task->speed > 0) ? task->speed : 0;
@@ -747,48 +806,6 @@ static void coroutine_fn pvebackup_co_start(void *opaque)
backup_state.di_list = di_list;
- /* start all jobs (paused state) */
- l = di_list;
- while (l) {
- PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
- l = g_list_next(l);
-
- // make sure target runs in same aoi_context as source
- AioContext *aio_context = bdrv_get_aio_context(di->bs);
- aio_context_acquire(aio_context);
- GSList *ignore = NULL;
- bdrv_set_aio_context_ignore(di->target, aio_context, &ignore);
- g_slist_free(ignore);
- aio_context_release(aio_context);
-
- job = backup_job_create(NULL, di->bs, di->target, backup_state.speed, MIRROR_SYNC_MODE_FULL, NULL,
- BITMAP_SYNC_MODE_NEVER, false, NULL, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
- JOB_DEFAULT, pvebackup_complete_cb, di, 1, NULL, &local_err);
- if (!job || local_err != NULL) {
- qemu_co_rwlock_wrlock(&backup_state.stat.rwlock);
- error_setg(&backup_state.stat.error, "backup_job_create failed");
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
- break;
- }
- job_start(&job->job);
- if (di->target) {
- bdrv_unref(di->target);
- di->target = NULL;
- }
- }
-
- qemu_co_mutex_unlock(&backup_state.backup_mutex);
-
- qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
- bool no_errors = !backup_state.stat.error;
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
-
- if (no_errors) {
- pvebackup_co_run_next_job(); // run one job
- } else {
- pvebackup_co_cancel(NULL);
- }
-
uuid_info = g_malloc0(sizeof(*uuid_info));
uuid_info->UUID = uuid_str;
@@ -831,8 +848,6 @@ err:
rmdir(backup_dir);
}
- qemu_co_mutex_unlock(&backup_state.backup_mutex);
-
task->result = NULL;
return;
}
@@ -876,32 +891,31 @@ UuidInfo *qmp_backup(
.errp = errp,
};
- block_on_coroutine_fn(pvebackup_co_start, &task);
+ qemu_mutex_lock(&backup_state.backup_mutex);
- return task.result;
-}
+ block_on_coroutine_fn(pvebackup_co_prepare, &task);
+ if (*errp == NULL) {
+ create_backup_jobs();
+ qemu_mutex_unlock(&backup_state.backup_mutex);
+ pvebackup_run_next_job();
+ } else {
+ qemu_mutex_unlock(&backup_state.backup_mutex);
+ }
-typedef struct QmpQueryBackupTask {
- Error **errp;
- BackupStatus *result;
-} QmpQueryBackupTask;
+ return task.result;
+}
-static void coroutine_fn pvebackup_co_query(void *opaque)
+BackupStatus *qmp_query_backup(Error **errp)
{
- assert(qemu_in_coroutine());
-
- QmpQueryBackupTask *task = opaque;
-
BackupStatus *info = g_malloc0(sizeof(*info));
- qemu_co_rwlock_rdlock(&backup_state.stat.rwlock);
+ qemu_mutex_lock(&backup_state.stat.lock);
if (!backup_state.stat.start_time) {
/* not started, return {} */
- task->result = info;
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
- return;
+ qemu_mutex_unlock(&backup_state.stat.lock);
+ return info;
}
info->has_status = true;
@@ -937,19 +951,7 @@ static void coroutine_fn pvebackup_co_query(void *opaque)
info->has_transferred = true;
info->transferred = backup_state.stat.transferred;
- task->result = info;
+ qemu_mutex_unlock(&backup_state.stat.lock);
- qemu_co_rwlock_unlock(&backup_state.stat.rwlock);
-}
-
-BackupStatus *qmp_query_backup(Error **errp)
-{
- QmpQueryBackupTask task = {
- .errp = errp,
- .result = NULL,
- };
-
- block_on_coroutine_fn(pvebackup_co_query, &task);
-
- return task.result;
+ return info;
}

View File

@ -20,13 +20,13 @@ Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
---
block/monitor/block-hmp-cmds.c | 1 +
monitor/hmp-cmds.c | 45 ++++++++++++----
proxmox-backup-client.c | 3 +-
proxmox-backup-client.h | 1 +
pve-backup.c | 95 ++++++++++++++++++++++++++++++----
qapi/block-core.json | 12 ++++-
6 files changed, 134 insertions(+), 23 deletions(-)
block/monitor/block-hmp-cmds.c | 1 +
monitor/hmp-cmds.c | 45 ++++++++++----
proxmox-backup-client.c | 3 +-
proxmox-backup-client.h | 1 +
pve-backup.c | 103 ++++++++++++++++++++++++++++++---
qapi/block-core.json | 12 +++-
6 files changed, 142 insertions(+), 23 deletions(-)
diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c
index 9ba7c774a2..056d14deee 100644
@ -132,7 +132,7 @@ index 1dda8b7d8f..8cbf645b2c 100644
diff --git a/pve-backup.c b/pve-backup.c
index d40f3f2fd6..d50f03a050 100644
index d40f3f2fd6..1cd9d31d7c 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -28,6 +28,8 @@
@ -257,8 +257,8 @@ index d40f3f2fd6..d50f03a050 100644
const char *fingerprint;
bool has_fingerprint;
int64_t backup_time;
+ bool has_incremental;
+ bool incremental;
+ bool has_use_dirty_bitmap;
+ bool use_dirty_bitmap;
bool has_format;
BackupFormat format;
bool has_config_file;
@ -274,7 +274,7 @@ index d40f3f2fd6..d50f03a050 100644
int dump_cb_block_size = PROXMOX_BACKUP_DEFAULT_CHUNK_SIZE; // Hardcoded (4M)
firewall_name = "fw.conf";
+ bool incremental = task->has_incremental && task->incremental;
+ bool use_dirty_bitmap = task->has_use_dirty_bitmap && task->use_dirty_bitmap;
+
char *pbs_err = NULL;
pbs = proxmox_backup_new(
@ -289,42 +289,50 @@ index d40f3f2fd6..d50f03a050 100644
goto err;
/* register all devices */
@@ -684,9 +721,32 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
@@ -684,9 +721,40 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
const char *devname = bdrv_get_device_name(di->bs);
- int dev_id = proxmox_backup_co_register_image(pbs, devname, di->size, task->errp);
- if (dev_id < 0)
+ BdrvDirtyBitmap *bitmap = bdrv_find_dirty_bitmap(di->bs, PBS_BITMAP_NAME);
+ bool expect_only_dirty = false;
+
+ bool use_incremental = false;
+ if (incremental) {
+ if (use_dirty_bitmap) {
+ if (bitmap == NULL) {
+ bitmap = bdrv_create_dirty_bitmap(di->bs, dump_cb_block_size, PBS_BITMAP_NAME, task->errp);
+ if (!bitmap) {
+ goto err;
+ }
+ /* mark entire bitmap as dirty to make full backup first */
+ } else {
+ expect_only_dirty = proxmox_backup_check_incremental(pbs, devname, di->size) != 0;
+ }
+
+ if (expect_only_dirty) {
+ dirty += bdrv_get_dirty_count(bitmap);
+ } else {
+ /* mark entire bitmap as dirty to make full backup */
+ bdrv_set_dirty_bitmap(bitmap, 0, di->size);
+ dirty += di->size;
+ } else {
+ use_incremental = true;
+ dirty += bdrv_get_dirty_count(bitmap);
+ }
+ di->bitmap = bitmap;
+ } else if (bitmap != NULL) {
+ } else {
+ dirty += di->size;
+ bdrv_release_dirty_bitmap(bitmap);
+
+ /* after a full backup the old dirty bitmap is invalid anyway */
+ if (bitmap != NULL) {
+ bdrv_release_dirty_bitmap(bitmap);
+ }
+ }
+
+ int dev_id = proxmox_backup_co_register_image(pbs, devname, di->size, use_incremental, task->errp);
+ int dev_id = proxmox_backup_co_register_image(pbs, devname, di->size, expect_only_dirty, task->errp);
+ if (dev_id < 0) {
goto err;
+ }
if (!(di->target = bdrv_backup_dump_create(dump_cb_block_size, di->size, pvebackup_co_dump_pbs_cb, di, task->errp))) {
goto err;
@@ -695,6 +755,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
@@ -695,6 +763,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
di->dev_id = dev_id;
}
} else if (format == BACKUP_FORMAT_VMA) {
@ -333,7 +341,7 @@ index d40f3f2fd6..d50f03a050 100644
vmaw = vma_writer_create(task->backup_file, uuid, &local_err);
if (!vmaw) {
if (local_err) {
@@ -722,6 +784,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
@@ -722,6 +792,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
}
}
} else if (format == BACKUP_FORMAT_DIR) {
@ -342,18 +350,18 @@ index d40f3f2fd6..d50f03a050 100644
if (mkdir(task->backup_file, 0640) != 0) {
error_setg_errno(task->errp, errno, "can't create directory '%s'\n",
task->backup_file);
@@ -794,8 +858,10 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
@@ -794,8 +866,10 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
char *uuid_str = g_strdup(backup_state.stat.uuid_str);
backup_state.stat.total = total;
+ backup_state.stat.dirty = dirty;
backup_state.stat.transferred = 0;
backup_state.stat.zero_bytes = 0;
+ backup_state.stat.reused = dirty >= total ? 0 : total - dirty;
+ backup_state.stat.reused = format == BACKUP_FORMAT_PBS && dirty >= total ? 0 : total - dirty;
qemu_mutex_unlock(&backup_state.stat.lock);
@@ -819,6 +885,10 @@ err:
@@ -819,6 +893,10 @@ err:
PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
l = g_list_next(l);
@ -364,24 +372,24 @@ index d40f3f2fd6..d50f03a050 100644
if (di->target) {
bdrv_unref(di->target);
}
@@ -860,6 +930,7 @@ UuidInfo *qmp_backup(
@@ -860,6 +938,7 @@ UuidInfo *qmp_backup(
bool has_fingerprint, const char *fingerprint,
bool has_backup_id, const char *backup_id,
bool has_backup_time, int64_t backup_time,
+ bool has_incremental, bool incremental,
+ bool has_use_dirty_bitmap, bool use_dirty_bitmap,
bool has_format, BackupFormat format,
bool has_config_file, const char *config_file,
bool has_firewall_file, const char *firewall_file,
@@ -878,6 +949,8 @@ UuidInfo *qmp_backup(
@@ -878,6 +957,8 @@ UuidInfo *qmp_backup(
.backup_id = backup_id,
.has_backup_time = has_backup_time,
.backup_time = backup_time,
+ .has_incremental = has_incremental,
+ .incremental = incremental,
+ .has_use_dirty_bitmap = has_use_dirty_bitmap,
+ .use_dirty_bitmap = use_dirty_bitmap,
.has_format = has_format,
.format = format,
.has_config_file = has_config_file,
@@ -946,10 +1019,14 @@ BackupStatus *qmp_query_backup(Error **errp)
@@ -946,10 +1027,14 @@ BackupStatus *qmp_query_backup(Error **errp)
info->has_total = true;
info->total = backup_state.stat.total;
@ -397,14 +405,14 @@ index d40f3f2fd6..d50f03a050 100644
qemu_mutex_unlock(&backup_state.stat.lock);
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 9054db608c..aadd5329b3 100644
index 9054db608c..d4e1c98c50 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -758,8 +758,13 @@
#
# @total: total amount of bytes involved in the backup process
#
+# @dirty: with incremental mode, this is the amount of bytes involved
+# @dirty: with incremental mode (PBS) this is the amount of bytes involved
+# in the backup process which are marked dirty.
+#
# @transferred: amount of bytes already backed up.
@ -429,7 +437,7 @@ index 9054db608c..aadd5329b3 100644
#
# @backup-time: backup timestamp (Unix epoch, required for format 'pbs')
#
+# @incremental: sync incremental changes since last job (optional for format 'pbs')
+# @use-dirty-bitmap: use dirty bitmap to detect incremental changes since last job (optional for format 'pbs')
+#
# Returns: the uuid of the backup job
#
@ -438,7 +446,7 @@ index 9054db608c..aadd5329b3 100644
'*fingerprint': 'str',
'*backup-id': 'str',
'*backup-time': 'int',
+ '*incremental': 'bool',
+ '*use-dirty-bitmap': 'bool',
'*format': 'BackupFormat',
'*config-file': 'str',
'*firewall-file': 'str',

View File

@ -0,0 +1,218 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Dietmar Maurer <dietmar@proxmox.com>
Date: Thu, 9 Jul 2020 12:53:08 +0200
Subject: [PATCH] PVE: various PBS fixes
pbs: fix crypt and compress parameters
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
PVE: handle PBS write callback with big blocks correctly
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
PVE: add zero block handling to PBS dump callback
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
---
block/monitor/block-hmp-cmds.c | 4 ++-
pve-backup.c | 57 +++++++++++++++++++++++++++-------
qapi/block-core.json | 6 ++++
3 files changed, 54 insertions(+), 13 deletions(-)
diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c
index 056d14deee..46c63b1cf9 100644
--- a/block/monitor/block-hmp-cmds.c
+++ b/block/monitor/block-hmp-cmds.c
@@ -1039,7 +1039,9 @@ void hmp_backup(Monitor *mon, const QDict *qdict)
false, NULL, // PBS fingerprint
false, NULL, // PBS backup-id
false, 0, // PBS backup-time
- false, false, // PBS incremental
+ false, false, // PBS use-dirty-bitmap
+ false, false, // PBS compress
+ false, false, // PBS encrypt
true, dir ? BACKUP_FORMAT_DIR : BACKUP_FORMAT_VMA,
false, NULL, false, NULL, !!devlist,
devlist, qdict_haskey(qdict, "speed"), speed, &error);
diff --git a/pve-backup.c b/pve-backup.c
index 1cd9d31d7c..b8182aaf89 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -8,6 +8,7 @@
#include "block/blockjob.h"
#include "qapi/qapi-commands-block.h"
#include "qapi/qmp/qerror.h"
+#include "qemu/cutils.h"
/* PVE backup state and related function */
@@ -67,6 +68,7 @@ opts_init(pvebackup_init);
typedef struct PVEBackupDevInfo {
BlockDriverState *bs;
size_t size;
+ uint64_t block_size;
uint8_t dev_id;
bool completed;
char targetfile[PATH_MAX];
@@ -135,10 +137,13 @@ pvebackup_co_dump_pbs_cb(
PVEBackupDevInfo *di = opaque;
assert(backup_state.pbs);
+ assert(buf);
Error *local_err = NULL;
int pbs_res = -1;
+ bool is_zero_block = size == di->block_size && buffer_is_zero(buf, size);
+
qemu_co_mutex_lock(&backup_state.dump_callback_mutex);
// avoid deadlock if job is cancelled
@@ -147,17 +152,29 @@ pvebackup_co_dump_pbs_cb(
return -1;
}
- pbs_res = proxmox_backup_co_write_data(backup_state.pbs, di->dev_id, buf, start, size, &local_err);
- qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
+ uint64_t transferred = 0;
+ uint64_t reused = 0;
+ while (transferred < size) {
+ uint64_t left = size - transferred;
+ uint64_t to_transfer = left < di->block_size ? left : di->block_size;
- if (pbs_res < 0) {
- pvebackup_propagate_error(local_err);
- return pbs_res;
- } else {
- size_t reused = (pbs_res == 0) ? size : 0;
- pvebackup_add_transfered_bytes(size, !buf ? size : 0, reused);
+ pbs_res = proxmox_backup_co_write_data(backup_state.pbs, di->dev_id,
+ is_zero_block ? NULL : buf + transferred, start + transferred,
+ to_transfer, &local_err);
+ transferred += to_transfer;
+
+ if (pbs_res < 0) {
+ pvebackup_propagate_error(local_err);
+ qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
+ return pbs_res;
+ }
+
+ reused += pbs_res == 0 ? to_transfer : 0;
}
+ qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
+ pvebackup_add_transfered_bytes(size, is_zero_block ? size : 0, reused);
+
return size;
}
@@ -178,6 +195,7 @@ pvebackup_co_dump_vma_cb(
int ret = -1;
assert(backup_state.vmaw);
+ assert(buf);
uint64_t remaining = size;
@@ -204,9 +222,7 @@ pvebackup_co_dump_vma_cb(
qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
++cluster_num;
- if (buf) {
- buf += VMA_CLUSTER_SIZE;
- }
+ buf += VMA_CLUSTER_SIZE;
if (ret < 0) {
Error *local_err = NULL;
vma_writer_error_propagate(backup_state.vmaw, &local_err);
@@ -567,6 +583,10 @@ typedef struct QmpBackupTask {
const char *firewall_file;
bool has_devlist;
const char *devlist;
+ bool has_compress;
+ bool compress;
+ bool has_encrypt;
+ bool encrypt;
bool has_speed;
int64_t speed;
Error **errp;
@@ -690,6 +710,7 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
bool use_dirty_bitmap = task->has_use_dirty_bitmap && task->use_dirty_bitmap;
+
char *pbs_err = NULL;
pbs = proxmox_backup_new(
task->backup_file,
@@ -699,8 +720,10 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
task->has_password ? task->password : NULL,
task->has_keyfile ? task->keyfile : NULL,
task->has_key_password ? task->key_password : NULL,
+ task->has_compress ? task->compress : true,
+ task->has_encrypt ? task->encrypt : task->has_keyfile,
task->has_fingerprint ? task->fingerprint : NULL,
- &pbs_err);
+ &pbs_err);
if (!pbs) {
error_set(task->errp, ERROR_CLASS_GENERIC_ERROR,
@@ -719,6 +742,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
l = g_list_next(l);
+ di->block_size = dump_cb_block_size;
+
const char *devname = bdrv_get_device_name(di->bs);
BdrvDirtyBitmap *bitmap = bdrv_find_dirty_bitmap(di->bs, PBS_BITMAP_NAME);
@@ -939,6 +964,8 @@ UuidInfo *qmp_backup(
bool has_backup_id, const char *backup_id,
bool has_backup_time, int64_t backup_time,
bool has_use_dirty_bitmap, bool use_dirty_bitmap,
+ bool has_compress, bool compress,
+ bool has_encrypt, bool encrypt,
bool has_format, BackupFormat format,
bool has_config_file, const char *config_file,
bool has_firewall_file, const char *firewall_file,
@@ -949,6 +976,8 @@ UuidInfo *qmp_backup(
.backup_file = backup_file,
.has_password = has_password,
.password = password,
+ .has_keyfile = has_keyfile,
+ .keyfile = keyfile,
.has_key_password = has_key_password,
.key_password = key_password,
.has_fingerprint = has_fingerprint,
@@ -959,6 +988,10 @@ UuidInfo *qmp_backup(
.backup_time = backup_time,
.has_use_dirty_bitmap = has_use_dirty_bitmap,
.use_dirty_bitmap = use_dirty_bitmap,
+ .has_compress = has_compress,
+ .compress = compress,
+ .has_encrypt = has_encrypt,
+ .encrypt = encrypt,
.has_format = has_format,
.format = format,
.has_config_file = has_config_file,
diff --git a/qapi/block-core.json b/qapi/block-core.json
index d4e1c98c50..0fda1e3fd3 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -823,6 +823,10 @@
#
# @use-dirty-bitmap: use dirty bitmap to detect incremental changes since last job (optional for format 'pbs')
#
+# @compress: use compression (optional for format 'pbs', defaults to true)
+#
+# @encrypt: use encryption ((optional for format 'pbs', defaults to true if there is a keyfile)
+#
# Returns: the uuid of the backup job
#
##
@@ -834,6 +838,8 @@
'*backup-id': 'str',
'*backup-time': 'int',
'*use-dirty-bitmap': 'bool',
+ '*compress': 'bool',
+ '*encrypt': 'bool',
'*format': 'BackupFormat',
'*config-file': 'str',
'*firewall-file': 'str',

View File

@ -1,126 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Thomas Lamprecht <t.lamprecht@proxmox.com>
Date: Mon, 6 Jul 2020 20:05:16 +0200
Subject: [PATCH] PVE backup: rename incremental to use-dirty-bitmap
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
---
pve-backup.c | 22 +++++++++++-----------
qapi/block-core.json | 6 +++---
2 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/pve-backup.c b/pve-backup.c
index d50f03a050..7bf54b4c5d 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -557,8 +557,8 @@ typedef struct QmpBackupTask {
const char *fingerprint;
bool has_fingerprint;
int64_t backup_time;
- bool has_incremental;
- bool incremental;
+ bool has_use_dirty_bitmap;
+ bool use_dirty_bitmap;
bool has_format;
BackupFormat format;
bool has_config_file;
@@ -688,7 +688,7 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
int dump_cb_block_size = PROXMOX_BACKUP_DEFAULT_CHUNK_SIZE; // Hardcoded (4M)
firewall_name = "fw.conf";
- bool incremental = task->has_incremental && task->incremental;
+ bool use_dirty_bitmap = task->has_use_dirty_bitmap && task->use_dirty_bitmap;
char *pbs_err = NULL;
pbs = proxmox_backup_new(
@@ -722,9 +722,9 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
const char *devname = bdrv_get_device_name(di->bs);
BdrvDirtyBitmap *bitmap = bdrv_find_dirty_bitmap(di->bs, PBS_BITMAP_NAME);
+ bool expect_only_dirty = false;
- bool use_incremental = false;
- if (incremental) {
+ if (use_dirty_bitmap) {
if (bitmap == NULL) {
bitmap = bdrv_create_dirty_bitmap(di->bs, dump_cb_block_size, PBS_BITMAP_NAME, task->errp);
if (!bitmap) {
@@ -734,7 +734,7 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
bdrv_set_dirty_bitmap(bitmap, 0, di->size);
dirty += di->size;
} else {
- use_incremental = true;
+ expect_only_dirty = true;
dirty += bdrv_get_dirty_count(bitmap);
}
di->bitmap = bitmap;
@@ -743,7 +743,7 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
bdrv_release_dirty_bitmap(bitmap);
}
- int dev_id = proxmox_backup_co_register_image(pbs, devname, di->size, use_incremental, task->errp);
+ int dev_id = proxmox_backup_co_register_image(pbs, devname, di->size, expect_only_dirty, task->errp);
if (dev_id < 0) {
goto err;
}
@@ -861,7 +861,7 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
backup_state.stat.dirty = dirty;
backup_state.stat.transferred = 0;
backup_state.stat.zero_bytes = 0;
- backup_state.stat.reused = dirty >= total ? 0 : total - dirty;
+ backup_state.stat.reused = format == BACKUP_FORMAT_PBS && dirty >= total ? 0 : total - dirty;
qemu_mutex_unlock(&backup_state.stat.lock);
@@ -930,7 +930,7 @@ UuidInfo *qmp_backup(
bool has_fingerprint, const char *fingerprint,
bool has_backup_id, const char *backup_id,
bool has_backup_time, int64_t backup_time,
- bool has_incremental, bool incremental,
+ bool has_use_dirty_bitmap, bool use_dirty_bitmap,
bool has_format, BackupFormat format,
bool has_config_file, const char *config_file,
bool has_firewall_file, const char *firewall_file,
@@ -949,8 +949,8 @@ UuidInfo *qmp_backup(
.backup_id = backup_id,
.has_backup_time = has_backup_time,
.backup_time = backup_time,
- .has_incremental = has_incremental,
- .incremental = incremental,
+ .has_use_dirty_bitmap = has_use_dirty_bitmap,
+ .use_dirty_bitmap = use_dirty_bitmap,
.has_format = has_format,
.format = format,
.has_config_file = has_config_file,
diff --git a/qapi/block-core.json b/qapi/block-core.json
index aadd5329b3..d4e1c98c50 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -758,7 +758,7 @@
#
# @total: total amount of bytes involved in the backup process
#
-# @dirty: with incremental mode, this is the amount of bytes involved
+# @dirty: with incremental mode (PBS) this is the amount of bytes involved
# in the backup process which are marked dirty.
#
# @transferred: amount of bytes already backed up.
@@ -821,7 +821,7 @@
#
# @backup-time: backup timestamp (Unix epoch, required for format 'pbs')
#
-# @incremental: sync incremental changes since last job (optional for format 'pbs')
+# @use-dirty-bitmap: use dirty bitmap to detect incremental changes since last job (optional for format 'pbs')
#
# Returns: the uuid of the backup job
#
@@ -833,7 +833,7 @@
'*fingerprint': 'str',
'*backup-id': 'str',
'*backup-time': 'int',
- '*incremental': 'bool',
+ '*use-dirty-bitmap': 'bool',
'*format': 'BackupFormat',
'*config-file': 'str',
'*firewall-file': 'str',

View File

@ -16,10 +16,10 @@ Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2 files changed, 33 insertions(+)
diff --git a/pve-backup.c b/pve-backup.c
index bfb648d6b5..ba9d0d8a86 100644
index b8182aaf89..40c2697b37 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -1051,3 +1051,11 @@ BackupStatus *qmp_query_backup(Error **errp)
@@ -1073,3 +1073,11 @@ BackupStatus *qmp_query_backup(Error **errp)
return info;
}

View File

@ -1,44 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Stefan Reiter <s.reiter@proxmox.com>
Date: Mon, 6 Jul 2020 14:40:12 +0200
Subject: [PATCH] PVE: fixup pbs-restore API
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
---
pbs-restore.c | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/pbs-restore.c b/pbs-restore.c
index d4daee7e91..4d3f925a1b 100644
--- a/pbs-restore.c
+++ b/pbs-restore.c
@@ -161,13 +161,19 @@ int main(int argc, char **argv)
fprintf(stderr, "connecting to repository '%s'\n", repository);
}
char *pbs_error = NULL;
- ProxmoxRestoreHandle *conn = proxmox_restore_connect(
+ ProxmoxRestoreHandle *conn = proxmox_restore_new(
repository, snapshot, password, keyfile, key_password, fingerprint, &pbs_error);
if (conn == NULL) {
fprintf(stderr, "restore failed: %s\n", pbs_error);
return -1;
}
+ int res = proxmox_restore_connect(conn, &pbs_error);
+ if (res < 0 || pbs_error) {
+ fprintf(stderr, "restore failed (connection error): %s\n", pbs_error);
+ return -1;
+ }
+
QDict *options = qdict_new();
if (format) {
@@ -198,7 +204,7 @@ int main(int argc, char **argv)
fprintf(stderr, "starting to restore snapshot '%s'\n", snapshot);
fflush(stderr); // ensure we do not get printed after the progress log
}
- int res = proxmox_restore_image(
+ res = proxmox_restore_image(
conn,
archive_name,
write_callback,

View File

@ -1,30 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Stefan Reiter <s.reiter@proxmox.com>
Date: Mon, 6 Jul 2020 14:40:13 +0200
Subject: [PATCH] PVE: always set dirty counter for non-incremental backups
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
---
pve-backup.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/pve-backup.c b/pve-backup.c
index 7bf54b4c5d..1f2a0bbe8c 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -738,9 +738,13 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
dirty += bdrv_get_dirty_count(bitmap);
}
di->bitmap = bitmap;
- } else if (bitmap != NULL) {
+ } else {
dirty += di->size;
- bdrv_release_dirty_bitmap(bitmap);
+
+ /* after a full backup the old dirty bitmap is invalid anyway */
+ if (bitmap != NULL) {
+ bdrv_release_dirty_bitmap(bitmap);
+ }
}
int dev_id = proxmox_backup_co_register_image(pbs, devname, di->size, expect_only_dirty, task->errp);

View File

@ -1,36 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Stefan Reiter <s.reiter@proxmox.com>
Date: Mon, 6 Jul 2020 14:40:14 +0200
Subject: [PATCH] PVE: use proxmox_backup_check_incremental
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
---
pve-backup.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/pve-backup.c b/pve-backup.c
index 1f2a0bbe8c..1cd9d31d7c 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -730,12 +730,16 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
if (!bitmap) {
goto err;
}
- /* mark entire bitmap as dirty to make full backup first */
- bdrv_set_dirty_bitmap(bitmap, 0, di->size);
- dirty += di->size;
} else {
- expect_only_dirty = true;
+ expect_only_dirty = proxmox_backup_check_incremental(pbs, devname, di->size) != 0;
+ }
+
+ if (expect_only_dirty) {
dirty += bdrv_get_dirty_count(bitmap);
+ } else {
+ /* mark entire bitmap as dirty to make full backup */
+ bdrv_set_dirty_bitmap(bitmap, 0, di->size);
+ dirty += di->size;
}
di->bitmap = bitmap;
} else {

View File

@ -6,8 +6,8 @@ Subject: [PATCH] PVE: Add sequential job transaction support
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
---
include/qemu/job.h | 12 ++++++++++++
job.c | 24 ++++++++++++++++++++++++
2 files changed, 36 insertions(+)
job.c | 31 +++++++++++++++++++++++++++++++
2 files changed, 43 insertions(+)
diff --git a/include/qemu/job.h b/include/qemu/job.h
index 32aabb1c60..f7a6a0926a 100644
@ -33,7 +33,7 @@ index 32aabb1c60..f7a6a0926a 100644
* Release a reference that was previously acquired with job_txn_add_job or
* job_txn_new. If it's the last reference to the object, it will be freed.
diff --git a/job.c b/job.c
index f9884e7d9d..8f06e05fbf 100644
index f9884e7d9d..05b7797e82 100644
--- a/job.c
+++ b/job.c
@@ -72,6 +72,8 @@ struct JobTxn {
@ -81,3 +81,17 @@ index f9884e7d9d..8f06e05fbf 100644
return;
}
assert(other_job->ret == 0);
@@ -1011,6 +1035,13 @@ int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
return -EBUSY;
}
+ /* in a sequential transaction jobs with status CREATED can appear at time
+ * of cancelling, these have not begun work so job_enter won't do anything,
+ * let's ensure they are marked as ABORTING if required */
+ if (job->status == JOB_STATUS_CREATED && job->txn->sequential) {
+ job_update_rc(job);
+ }
+
AIO_WAIT_WHILE(job->aio_context,
(job_enter(job), !job_is_completed(job)));

View File

@ -1,103 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Dietmar Maurer <dietmar@proxmox.com>
Date: Thu, 9 Jul 2020 12:53:08 +0200
Subject: [PATCH] PVE: fixup pbs backup, add compress and encrypt options
---
block/monitor/block-hmp-cmds.c | 4 +++-
pve-backup.c | 13 ++++++++++++-
qapi/block-core.json | 6 ++++++
3 files changed, 21 insertions(+), 2 deletions(-)
diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c
index 056d14deee..46c63b1cf9 100644
--- a/block/monitor/block-hmp-cmds.c
+++ b/block/monitor/block-hmp-cmds.c
@@ -1039,7 +1039,9 @@ void hmp_backup(Monitor *mon, const QDict *qdict)
false, NULL, // PBS fingerprint
false, NULL, // PBS backup-id
false, 0, // PBS backup-time
- false, false, // PBS incremental
+ false, false, // PBS use-dirty-bitmap
+ false, false, // PBS compress
+ false, false, // PBS encrypt
true, dir ? BACKUP_FORMAT_DIR : BACKUP_FORMAT_VMA,
false, NULL, false, NULL, !!devlist,
devlist, qdict_haskey(qdict, "speed"), speed, &error);
diff --git a/pve-backup.c b/pve-backup.c
index 1cd9d31d7c..bfb648d6b5 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -567,6 +567,10 @@ typedef struct QmpBackupTask {
const char *firewall_file;
bool has_devlist;
const char *devlist;
+ bool has_compress;
+ bool compress;
+ bool has_encrypt;
+ bool encrypt;
bool has_speed;
int64_t speed;
Error **errp;
@@ -690,6 +694,7 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
bool use_dirty_bitmap = task->has_use_dirty_bitmap && task->use_dirty_bitmap;
+
char *pbs_err = NULL;
pbs = proxmox_backup_new(
task->backup_file,
@@ -699,8 +704,10 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
task->has_password ? task->password : NULL,
task->has_keyfile ? task->keyfile : NULL,
task->has_key_password ? task->key_password : NULL,
+ task->has_compress ? task->compress : true,
+ task->has_encrypt ? task->encrypt : task->has_keyfile,
task->has_fingerprint ? task->fingerprint : NULL,
- &pbs_err);
+ &pbs_err);
if (!pbs) {
error_set(task->errp, ERROR_CLASS_GENERIC_ERROR,
@@ -939,6 +946,8 @@ UuidInfo *qmp_backup(
bool has_backup_id, const char *backup_id,
bool has_backup_time, int64_t backup_time,
bool has_use_dirty_bitmap, bool use_dirty_bitmap,
+ bool has_compress, bool compress,
+ bool has_encrypt, bool encrypt,
bool has_format, BackupFormat format,
bool has_config_file, const char *config_file,
bool has_firewall_file, const char *firewall_file,
@@ -967,6 +976,8 @@ UuidInfo *qmp_backup(
.firewall_file = firewall_file,
.has_devlist = has_devlist,
.devlist = devlist,
+ .has_compress = has_compress,
+ .has_encrypt = has_encrypt,
.has_speed = has_speed,
.speed = speed,
.errp = errp,
diff --git a/qapi/block-core.json b/qapi/block-core.json
index d4e1c98c50..0fda1e3fd3 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -823,6 +823,10 @@
#
# @use-dirty-bitmap: use dirty bitmap to detect incremental changes since last job (optional for format 'pbs')
#
+# @compress: use compression (optional for format 'pbs', defaults to true)
+#
+# @encrypt: use encryption ((optional for format 'pbs', defaults to true if there is a keyfile)
+#
# Returns: the uuid of the backup job
#
##
@@ -834,6 +838,8 @@
'*backup-id': 'str',
'*backup-time': 'int',
'*use-dirty-bitmap': 'bool',
+ '*compress': 'bool',
+ '*encrypt': 'bool',
'*format': 'BackupFormat',
'*config-file': 'str',
'*firewall-file': 'str',

View File

@ -1,7 +1,8 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Stefan Reiter <s.reiter@proxmox.com>
Date: Mon, 28 Sep 2020 13:40:51 +0200
Subject: [PATCH] PVE-Backup: Use more coroutines and don't block on finishing
Subject: [PATCH] PVE-Backup: Don't block on finishing and cleanup
create_backup_jobs
proxmox_backup_co_finish is already async, but previously we would wait
for the coroutine using block_on_coroutine_fn(). Avoid this by
@ -29,16 +30,31 @@ To communicate the finishing state, a new property is introduced to
query-backup: 'finishing'. A new state is explicitly not used, since
that would break compatibility with older qemu-server versions.
Also fix create_backup_jobs:
No more weird bool returns, just the standard "errp" format used
everywhere else too. With this, if backup_job_create fails, the error
message is actually returned over QMP and can be shown to the user.
To facilitate correct cleanup on such an error, we call
create_backup_jobs as a bottom half directly from pvebackup_co_prepare.
This additionally allows us to actually hold the backup_mutex during
operation.
Also add a job_cancel_sync before job_unref, since a job must be in
STATUS_NULL to be deleted by unref, which could trigger an assert
before.
[0] https://lists.gnu.org/archive/html/qemu-devel/2020-09/msg03515.html
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
---
pve-backup.c | 148 ++++++++++++++++++++++++++-----------------
pve-backup.c | 217 ++++++++++++++++++++++++++++---------------
qapi/block-core.json | 5 +-
2 files changed, 95 insertions(+), 58 deletions(-)
2 files changed, 144 insertions(+), 78 deletions(-)
diff --git a/pve-backup.c b/pve-backup.c
index f3df43eb8c..f3b8ce1f3a 100644
index f3df43eb8c..ded90cb822 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -33,7 +33,9 @@ const char *PBS_BITMAP_NAME = "pbs-incremental-dirty-bitmap";
@ -52,11 +68,12 @@ index f3df43eb8c..f3b8ce1f3a 100644
QemuMutex lock;
Error *error;
time_t start_time;
@@ -47,20 +49,21 @@ static struct PVEBackupState {
@@ -47,20 +49,22 @@ static struct PVEBackupState {
size_t reused;
size_t zero_bytes;
GList *bitmap_list;
+ bool finishing;
+ bool starting;
} stat;
int64_t speed;
VmaWriter *vmaw;
@ -76,7 +93,7 @@ index f3df43eb8c..f3b8ce1f3a 100644
qemu_co_mutex_init(&backup_state.dump_callback_mutex);
}
@@ -72,6 +75,7 @@ typedef struct PVEBackupDevInfo {
@@ -72,6 +76,7 @@ typedef struct PVEBackupDevInfo {
size_t size;
uint64_t block_size;
uint8_t dev_id;
@ -84,7 +101,7 @@ index f3df43eb8c..f3b8ce1f3a 100644
char targetfile[PATH_MAX];
BdrvDirtyBitmap *bitmap;
BlockDriverState *target;
@@ -227,12 +231,12 @@ pvebackup_co_dump_vma_cb(
@@ -227,12 +232,12 @@ pvebackup_co_dump_vma_cb(
}
// assumes the caller holds backup_mutex
@ -99,7 +116,7 @@ index f3df43eb8c..f3b8ce1f3a 100644
qemu_mutex_unlock(&backup_state.stat.lock);
if (backup_state.vmaw) {
@@ -261,12 +265,29 @@ static void coroutine_fn pvebackup_co_cleanup(void *unused)
@@ -261,35 +266,29 @@ static void coroutine_fn pvebackup_co_cleanup(void *unused)
g_list_free(backup_state.di_list);
backup_state.di_list = NULL;
@ -116,25 +133,28 @@ index f3df43eb8c..f3b8ce1f3a 100644
{
PVEBackupDevInfo *di = opaque;
+ int ret = di->completed_ret;
+
+ qemu_co_mutex_lock(&backup_state.backup_mutex);
+
+ if (ret < 0) {
+ Error *local_err = NULL;
+ error_setg(&local_err, "job failed with err %d - %s", ret, strerror(-ret));
+ pvebackup_propagate_error(local_err);
+ }
+
+ di->bs = NULL;
+
+ assert(di->target == NULL);
bool error_or_canceled = pvebackup_error_or_canceled();
@@ -281,27 +302,6 @@ static void coroutine_fn pvebackup_complete_stream(void *opaque)
pvebackup_propagate_error(local_err);
}
- bool error_or_canceled = pvebackup_error_or_canceled();
-
- if (backup_state.vmaw) {
- vma_writer_close_stream(backup_state.vmaw, di->dev_id);
+ qemu_mutex_lock(&backup_state.stat.lock);
+ bool starting = backup_state.stat.starting;
+ qemu_mutex_unlock(&backup_state.stat.lock);
+ if (starting) {
+ /* in 'starting' state, no tasks have been run yet, meaning we can (and
+ * must) skip all cleanup, as we don't know what has and hasn't been
+ * initialized yet. */
+ return;
}
- if (backup_state.pbs && !error_or_canceled) {
- Error *local_err = NULL;
- proxmox_backup_co_close_image(backup_state.pbs, di->dev_id, &local_err);
- if (local_err != NULL) {
- pvebackup_propagate_error(local_err);
- }
- }
-}
-
-static void pvebackup_complete_cb(void *opaque, int ret)
@ -144,22 +164,32 @@ index f3df43eb8c..f3b8ce1f3a 100644
- PVEBackupDevInfo *di = opaque;
-
- qemu_mutex_lock(&backup_state.backup_mutex);
-
- if (ret < 0) {
- Error *local_err = NULL;
- error_setg(&local_err, "job failed with err %d - %s", ret, strerror(-ret));
- pvebackup_propagate_error(local_err);
- }
-
- di->bs = NULL;
-
- assert(di->target == NULL);
-
+ qemu_co_mutex_lock(&backup_state.backup_mutex);
if (ret < 0) {
Error *local_err = NULL;
@@ -301,7 +300,19 @@ static void pvebackup_complete_cb(void *opaque, int ret)
assert(di->target == NULL);
- block_on_coroutine_fn(pvebackup_complete_stream, di);
+ bool error_or_canceled = pvebackup_error_or_canceled();
+
+ if (backup_state.vmaw) {
+ vma_writer_close_stream(backup_state.vmaw, di->dev_id);
+ }
+
+ if (backup_state.pbs && !error_or_canceled) {
+ Error *local_err = NULL;
+ proxmox_backup_co_close_image(backup_state.pbs, di->dev_id, &local_err);
+ if (local_err != NULL) {
+ pvebackup_propagate_error(local_err);
+ }
+ }
// remove self from job list
backup_state.di_list = g_list_remove(backup_state.di_list, di);
@@ -310,21 +310,49 @@ static void pvebackup_complete_cb(void *opaque, int ret)
@@ -310,21 +321,49 @@ static void pvebackup_complete_cb(void *opaque, int ret)
/* call cleanup if we're the last job */
if (!g_list_first(backup_state.di_list)) {
@ -188,7 +218,7 @@ index f3df43eb8c..f3b8ce1f3a 100644
+ Coroutine *co = qemu_coroutine_create(pvebackup_co_complete_stream, di);
+ aio_co_enter(qemu_get_aio_context(), co);
+}
+
+/*
+ * job_cancel(_sync) does not like to be called from coroutines, so defer to
+ * main loop processing via a bottom half.
@ -202,7 +232,7 @@ index f3df43eb8c..f3b8ce1f3a 100644
+ aio_context_release(job_ctx);
+ aio_co_enter(data->ctx, data->co);
+}
+
+static void coroutine_fn pvebackup_co_cancel(void *opaque)
+{
Error *cancel_err = NULL;
@ -214,7 +244,7 @@ index f3df43eb8c..f3b8ce1f3a 100644
if (backup_state.vmaw) {
/* make sure vma writer does not block anymore */
@@ -342,27 +370,22 @@ static void pvebackup_cancel(void)
@@ -342,27 +381,22 @@ static void pvebackup_cancel(void)
((PVEBackupDevInfo *)bdi->data)->job :
NULL;
@ -251,22 +281,76 @@ index f3df43eb8c..f3b8ce1f3a 100644
}
// assumes the caller holds backup_mutex
@@ -415,6 +438,14 @@ static int coroutine_fn pvebackup_co_add_config(
@@ -415,10 +449,18 @@ static int coroutine_fn pvebackup_co_add_config(
goto out;
}
-static bool create_backup_jobs(void) {
+/*
+ * backup_job_create can *not* be run from a coroutine (and requires an
+ * acquired AioContext), so this can't either.
+ * This does imply that this function cannot run with backup_mutex acquired.
+ * That is ok because it is only ever called between setting up the backup_state
+ * struct and starting the jobs, and from within a QMP call. This means that no
+ * other QMP call can interrupt, and no background job is running yet.
+ * The caller is responsible that backup_mutex is held nonetheless.
+ */
static bool create_backup_jobs(void) {
+static void create_backup_jobs_bh(void *opaque) {
assert(!qemu_in_coroutine());
@@ -523,11 +554,12 @@ typedef struct QmpBackupTask {
+ CoCtxData *data = (CoCtxData*)opaque;
+ Error **errp = (Error**)data->data;
+
Error *local_err = NULL;
/* create job transaction to synchronize bitmap commit and cancel all
@@ -452,24 +494,19 @@ static bool create_backup_jobs(void) {
aio_context_release(aio_context);
- if (!job || local_err != NULL) {
- Error *create_job_err = NULL;
- error_setg(&create_job_err, "backup_job_create failed: %s",
- local_err ? error_get_pretty(local_err) : "null");
+ di->job = job;
- pvebackup_propagate_error(create_job_err);
+ if (!job || local_err) {
+ error_setg(errp, "backup_job_create failed: %s",
+ local_err ? error_get_pretty(local_err) : "null");
break;
}
- di->job = job;
-
bdrv_unref(di->target);
di->target = NULL;
}
- bool errors = pvebackup_error_or_canceled();
-
- if (errors) {
+ if (*errp) {
l = backup_state.di_list;
while (l) {
PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
@@ -481,12 +518,17 @@ static bool create_backup_jobs(void) {
}
if (di->job) {
+ AioContext *ctx = di->job->job.aio_context;
+ aio_context_acquire(ctx);
+ job_cancel_sync(&di->job->job);
job_unref(&di->job->job);
+ aio_context_release(ctx);
}
}
}
- return errors;
+ /* return */
+ aio_co_enter(data->ctx, data->co);
}
typedef struct QmpBackupTask {
@@ -523,11 +565,12 @@ typedef struct QmpBackupTask {
UuidInfo *result;
} QmpBackupTask;
@ -280,7 +364,7 @@ index f3df43eb8c..f3b8ce1f3a 100644
QmpBackupTask *task = opaque;
task->result = NULL; // just to be sure
@@ -548,8 +580,9 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
@@ -548,8 +591,9 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
const char *firewall_name = "qemu-server.fw";
if (backup_state.di_list) {
@ -291,7 +375,7 @@ index f3df43eb8c..f3b8ce1f3a 100644
return;
}
@@ -616,6 +649,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
@@ -616,6 +660,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
}
di->size = size;
total += size;
@ -300,24 +384,58 @@ index f3df43eb8c..f3b8ce1f3a 100644
}
uuid_generate(uuid);
@@ -847,6 +882,7 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
@@ -847,6 +893,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
backup_state.stat.dirty = total - backup_state.stat.reused;
backup_state.stat.transferred = 0;
backup_state.stat.zero_bytes = 0;
+ backup_state.stat.finishing = false;
+ backup_state.stat.starting = true;
qemu_mutex_unlock(&backup_state.stat.lock);
@@ -861,6 +897,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
@@ -861,6 +909,33 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
uuid_info->UUID = uuid_str;
task->result = uuid_info;
+
+ /* Run create_backup_jobs_bh outside of coroutine (in BH) but keep
+ * backup_mutex locked. This is fine, a CoMutex can be held across yield
+ * points, and we'll release it as soon as the BH reschedules us.
+ */
+ CoCtxData waker = {
+ .co = qemu_coroutine_self(),
+ .ctx = qemu_get_current_aio_context(),
+ .data = &local_err,
+ };
+ aio_bh_schedule_oneshot(waker.ctx, create_backup_jobs_bh, &waker);
+ qemu_coroutine_yield();
+
+ if (local_err) {
+ error_propagate(task->errp, local_err);
+ goto err;
+ }
+
+ qemu_co_mutex_unlock(&backup_state.backup_mutex);
+
+ qemu_mutex_lock(&backup_state.stat.lock);
+ backup_state.stat.starting = false;
+ qemu_mutex_unlock(&backup_state.stat.lock);
+
+ /* start the first job in the transaction */
+ job_txn_start_seq(backup_state.txn);
+
return;
err_mutex:
@@ -903,6 +941,8 @@ err:
@@ -883,6 +958,7 @@ err:
g_free(di);
}
g_list_free(di_list);
+ backup_state.di_list = NULL;
if (devs) {
g_strfreev(devs);
@@ -903,6 +979,8 @@ err:
}
task->result = NULL;
@ -326,7 +444,7 @@ index f3df43eb8c..f3b8ce1f3a 100644
return;
}
@@ -956,22 +996,15 @@ UuidInfo *qmp_backup(
@@ -956,24 +1034,8 @@ UuidInfo *qmp_backup(
.errp = errp,
};
@ -334,23 +452,24 @@ index f3df43eb8c..f3b8ce1f3a 100644
-
block_on_coroutine_fn(pvebackup_co_prepare, &task);
if (*errp == NULL) {
bool errors = create_backup_jobs();
- if (*errp == NULL) {
- bool errors = create_backup_jobs();
- qemu_mutex_unlock(&backup_state.backup_mutex);
if (!errors) {
-
- if (!errors) {
- /* start the first job in the transaction
- * note: this might directly enter the job, so we need to do this
- * after unlocking the backup_mutex */
+ // start the first job in the transaction
job_txn_start_seq(backup_state.txn);
}
- job_txn_start_seq(backup_state.txn);
- }
- } else {
- qemu_mutex_unlock(&backup_state.backup_mutex);
}
- }
-
return task.result;
@@ -1025,6 +1058,7 @@ BackupStatus *qmp_query_backup(Error **errp)
}
@@ -1025,6 +1087,7 @@ BackupStatus *qmp_query_backup(Error **errp)
info->transferred = backup_state.stat.transferred;
info->has_reused = true;
info->reused = backup_state.stat.reused;

View File

@ -1,43 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Wolfgang Bumiller <w.bumiller@proxmox.com>
Date: Fri, 10 Jul 2020 13:22:35 +0200
Subject: [PATCH] pbs: fix missing crypt and compress parameters
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
---
pve-backup.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/pve-backup.c b/pve-backup.c
index ba9d0d8a86..e1dcf10a45 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -958,6 +958,8 @@ UuidInfo *qmp_backup(
.backup_file = backup_file,
.has_password = has_password,
.password = password,
+ .has_keyfile = has_keyfile,
+ .keyfile = keyfile,
.has_key_password = has_key_password,
.key_password = key_password,
.has_fingerprint = has_fingerprint,
@@ -968,6 +970,10 @@ UuidInfo *qmp_backup(
.backup_time = backup_time,
.has_use_dirty_bitmap = has_use_dirty_bitmap,
.use_dirty_bitmap = use_dirty_bitmap,
+ .has_compress = has_compress,
+ .compress = compress,
+ .has_encrypt = has_encrypt,
+ .encrypt = encrypt,
.has_format = has_format,
.format = format,
.has_config_file = has_config_file,
@@ -976,8 +982,6 @@ UuidInfo *qmp_backup(
.firewall_file = firewall_file,
.has_devlist = has_devlist,
.devlist = devlist,
- .has_compress = has_compress,
- .has_encrypt = has_encrypt,
.has_speed = has_speed,
.speed = speed,
.errp = errp,

View File

@ -1,76 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Stefan Reiter <s.reiter@proxmox.com>
Date: Wed, 19 Aug 2020 12:33:17 +0200
Subject: [PATCH] PVE: handle PBS write callback with big blocks correctly
Under certain conditions QEMU will push more than the given blocksize
into the callback at once. Handle it like VMA does, by iterating the
data until all is written.
The block size is stored per backup device to be used in the callback.
This avoids relying on PROXMOX_BACKUP_DEFAULT_CHUNK_SIZE, in case it is
made configurable in the future.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
---
pve-backup.c | 30 ++++++++++++++++++++++--------
1 file changed, 22 insertions(+), 8 deletions(-)
diff --git a/pve-backup.c b/pve-backup.c
index e1dcf10a45..3eba85506a 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -67,6 +67,7 @@ opts_init(pvebackup_init);
typedef struct PVEBackupDevInfo {
BlockDriverState *bs;
size_t size;
+ uint64_t block_size;
uint8_t dev_id;
bool completed;
char targetfile[PATH_MAX];
@@ -147,17 +148,28 @@ pvebackup_co_dump_pbs_cb(
return -1;
}
- pbs_res = proxmox_backup_co_write_data(backup_state.pbs, di->dev_id, buf, start, size, &local_err);
- qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
+ uint64_t transferred = 0;
+ uint64_t reused = 0;
+ while (transferred < size) {
+ uint64_t left = size - transferred;
+ uint64_t to_transfer = left < di->block_size ? left : di->block_size;
- if (pbs_res < 0) {
- pvebackup_propagate_error(local_err);
- return pbs_res;
- } else {
- size_t reused = (pbs_res == 0) ? size : 0;
- pvebackup_add_transfered_bytes(size, !buf ? size : 0, reused);
+ pbs_res = proxmox_backup_co_write_data(backup_state.pbs, di->dev_id,
+ buf ? buf + transferred : NULL, start + transferred, to_transfer, &local_err);
+ transferred += to_transfer;
+
+ if (pbs_res < 0) {
+ pvebackup_propagate_error(local_err);
+ qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
+ return pbs_res;
+ }
+
+ reused += pbs_res == 0 ? to_transfer : 0;
}
+ qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
+ pvebackup_add_transfered_bytes(size, !buf ? size : 0, reused);
+
return size;
}
@@ -726,6 +738,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
l = g_list_next(l);
+ di->block_size = dump_cb_block_size;
+
const char *devname = bdrv_get_device_name(di->bs);
BdrvDirtyBitmap *bitmap = bdrv_find_dirty_bitmap(di->bs, PBS_BITMAP_NAME);

View File

@ -1,85 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Stefan Reiter <s.reiter@proxmox.com>
Date: Thu, 13 Aug 2020 13:50:27 +0200
Subject: [PATCH] PVE: add zero block handling to PBS dump callback
Both the PBS and VMA dump callbacks assume that a NULL pointer can be
passed as *pbuf, but that never happens, as backup-dump.c calls this
function with contents of an iovec.
So first, remove that assumption and add an 'assert' to verify.
Secondly, while the vma-writer already does the buffer_is_zero check
internally, for PBS we relied on that non-existant behaviour for zero
chunks, so do the buffer_is_zero check manually and pass NULL to the
rust lib in case it is true.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
---
pve-backup.c | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/pve-backup.c b/pve-backup.c
index 3eba85506a..40c2697b37 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -8,6 +8,7 @@
#include "block/blockjob.h"
#include "qapi/qapi-commands-block.h"
#include "qapi/qmp/qerror.h"
+#include "qemu/cutils.h"
/* PVE backup state and related function */
@@ -136,10 +137,13 @@ pvebackup_co_dump_pbs_cb(
PVEBackupDevInfo *di = opaque;
assert(backup_state.pbs);
+ assert(buf);
Error *local_err = NULL;
int pbs_res = -1;
+ bool is_zero_block = size == di->block_size && buffer_is_zero(buf, size);
+
qemu_co_mutex_lock(&backup_state.dump_callback_mutex);
// avoid deadlock if job is cancelled
@@ -155,7 +159,8 @@ pvebackup_co_dump_pbs_cb(
uint64_t to_transfer = left < di->block_size ? left : di->block_size;
pbs_res = proxmox_backup_co_write_data(backup_state.pbs, di->dev_id,
- buf ? buf + transferred : NULL, start + transferred, to_transfer, &local_err);
+ is_zero_block ? NULL : buf + transferred, start + transferred,
+ to_transfer, &local_err);
transferred += to_transfer;
if (pbs_res < 0) {
@@ -168,7 +173,7 @@ pvebackup_co_dump_pbs_cb(
}
qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
- pvebackup_add_transfered_bytes(size, !buf ? size : 0, reused);
+ pvebackup_add_transfered_bytes(size, is_zero_block ? size : 0, reused);
return size;
}
@@ -190,6 +195,7 @@ pvebackup_co_dump_vma_cb(
int ret = -1;
assert(backup_state.vmaw);
+ assert(buf);
uint64_t remaining = size;
@@ -216,9 +222,7 @@ pvebackup_co_dump_vma_cb(
qemu_co_mutex_unlock(&backup_state.dump_callback_mutex);
++cluster_num;
- if (buf) {
- buf += VMA_CLUSTER_SIZE;
- }
+ buf += VMA_CLUSTER_SIZE;
if (ret < 0) {
Error *local_err = NULL;
vma_writer_error_propagate(backup_state.vmaw, &local_err);

View File

@ -1,187 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Stefan Reiter <s.reiter@proxmox.com>
Date: Thu, 22 Oct 2020 17:01:07 +0200
Subject: [PATCH] PVE: fix and clean up error handling for create_backup_jobs
No more weird bool returns, just the standard "errp" format used
everywhere else too. With this, if backup_job_create fails, the error
message is actually returned over QMP and can be shown to the user.
To facilitate correct cleanup on such an error, we call
create_backup_jobs as a bottom half directly from pvebackup_co_prepare.
This additionally allows us to actually hold the backup_mutex during
operation.
Also add a job_cancel_sync before job_unref, since a job must be in
STATUS_NULL to be deleted by unref, which could trigger an assert
before.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
---
pve-backup.c | 79 +++++++++++++++++++++++++++++++++++-----------------
1 file changed, 54 insertions(+), 25 deletions(-)
diff --git a/pve-backup.c b/pve-backup.c
index f3b8ce1f3a..ded90cb822 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -50,6 +50,7 @@ static struct PVEBackupState {
size_t zero_bytes;
GList *bitmap_list;
bool finishing;
+ bool starting;
} stat;
int64_t speed;
VmaWriter *vmaw;
@@ -277,6 +278,16 @@ static void coroutine_fn pvebackup_co_complete_stream(void *opaque)
PVEBackupDevInfo *di = opaque;
int ret = di->completed_ret;
+ qemu_mutex_lock(&backup_state.stat.lock);
+ bool starting = backup_state.stat.starting;
+ qemu_mutex_unlock(&backup_state.stat.lock);
+ if (starting) {
+ /* in 'starting' state, no tasks have been run yet, meaning we can (and
+ * must) skip all cleanup, as we don't know what has and hasn't been
+ * initialized yet. */
+ return;
+ }
+
qemu_co_mutex_lock(&backup_state.backup_mutex);
if (ret < 0) {
@@ -441,15 +452,15 @@ static int coroutine_fn pvebackup_co_add_config(
/*
* backup_job_create can *not* be run from a coroutine (and requires an
* acquired AioContext), so this can't either.
- * This does imply that this function cannot run with backup_mutex acquired.
- * That is ok because it is only ever called between setting up the backup_state
- * struct and starting the jobs, and from within a QMP call. This means that no
- * other QMP call can interrupt, and no background job is running yet.
+ * The caller is responsible that backup_mutex is held nonetheless.
*/
-static bool create_backup_jobs(void) {
+static void create_backup_jobs_bh(void *opaque) {
assert(!qemu_in_coroutine());
+ CoCtxData *data = (CoCtxData*)opaque;
+ Error **errp = (Error**)data->data;
+
Error *local_err = NULL;
/* create job transaction to synchronize bitmap commit and cancel all
@@ -483,24 +494,19 @@ static bool create_backup_jobs(void) {
aio_context_release(aio_context);
- if (!job || local_err != NULL) {
- Error *create_job_err = NULL;
- error_setg(&create_job_err, "backup_job_create failed: %s",
- local_err ? error_get_pretty(local_err) : "null");
+ di->job = job;
- pvebackup_propagate_error(create_job_err);
+ if (!job || local_err) {
+ error_setg(errp, "backup_job_create failed: %s",
+ local_err ? error_get_pretty(local_err) : "null");
break;
}
- di->job = job;
-
bdrv_unref(di->target);
di->target = NULL;
}
- bool errors = pvebackup_error_or_canceled();
-
- if (errors) {
+ if (*errp) {
l = backup_state.di_list;
while (l) {
PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
@@ -512,12 +518,17 @@ static bool create_backup_jobs(void) {
}
if (di->job) {
+ AioContext *ctx = di->job->job.aio_context;
+ aio_context_acquire(ctx);
+ job_cancel_sync(&di->job->job);
job_unref(&di->job->job);
+ aio_context_release(ctx);
}
}
}
- return errors;
+ /* return */
+ aio_co_enter(data->ctx, data->co);
}
typedef struct QmpBackupTask {
@@ -883,6 +894,7 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
backup_state.stat.transferred = 0;
backup_state.stat.zero_bytes = 0;
backup_state.stat.finishing = false;
+ backup_state.stat.starting = true;
qemu_mutex_unlock(&backup_state.stat.lock);
@@ -898,7 +910,32 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
task->result = uuid_info;
+ /* Run create_backup_jobs_bh outside of coroutine (in BH) but keep
+ * backup_mutex locked. This is fine, a CoMutex can be held across yield
+ * points, and we'll release it as soon as the BH reschedules us.
+ */
+ CoCtxData waker = {
+ .co = qemu_coroutine_self(),
+ .ctx = qemu_get_current_aio_context(),
+ .data = &local_err,
+ };
+ aio_bh_schedule_oneshot(waker.ctx, create_backup_jobs_bh, &waker);
+ qemu_coroutine_yield();
+
+ if (local_err) {
+ error_propagate(task->errp, local_err);
+ goto err;
+ }
+
qemu_co_mutex_unlock(&backup_state.backup_mutex);
+
+ qemu_mutex_lock(&backup_state.stat.lock);
+ backup_state.stat.starting = false;
+ qemu_mutex_unlock(&backup_state.stat.lock);
+
+ /* start the first job in the transaction */
+ job_txn_start_seq(backup_state.txn);
+
return;
err_mutex:
@@ -921,6 +958,7 @@ err:
g_free(di);
}
g_list_free(di_list);
+ backup_state.di_list = NULL;
if (devs) {
g_strfreev(devs);
@@ -998,15 +1036,6 @@ UuidInfo *qmp_backup(
block_on_coroutine_fn(pvebackup_co_prepare, &task);
- if (*errp == NULL) {
- bool errors = create_backup_jobs();
-
- if (!errors) {
- // start the first job in the transaction
- job_txn_start_seq(backup_state.txn);
- }
- }
-
return task.result;
}

View File

@ -1,39 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Stefan Reiter <s.reiter@proxmox.com>
Date: Mon, 4 Jan 2021 14:49:14 +0100
Subject: [PATCH] PVE: fix aborting multiple 'CREATED' jobs in sequential
transaction
Deadlocks could occur in the AIO_WAIT_WHILE loop in job_finish_sync,
which would wait for CREATED but not running jobs to complete, even
though job_enter is a no-op in that scenario. Mark offending jobs as
ABORTING immediately via job_update_rc if required.
Manifested itself in cancelling or failing backups with more than 2
drives.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
Tested-by: Mira Limbeck <m.limbeck@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
---
job.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/job.c b/job.c
index 8f06e05fbf..05b7797e82 100644
--- a/job.c
+++ b/job.c
@@ -1035,6 +1035,13 @@ int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
return -EBUSY;
}
+ /* in a sequential transaction jobs with status CREATED can appear at time
+ * of cancelling, these have not begun work so job_enter won't do anything,
+ * let's ensure they are marked as ABORTING if required */
+ if (job->status == JOB_STATUS_CREATED && job->txn->sequential) {
+ job_update_rc(job);
+ }
+
AIO_WAIT_WHILE(job->aio_context,
(job_enter(job), !job_is_completed(job)));

50
debian/patches/series vendored
View File

@ -35,33 +35,23 @@ pve/0026-PVE-Backup-add-vma-backup-format-code.patch
pve/0027-PVE-Backup-add-backup-dump-block-driver.patch
pve/0028-PVE-Backup-proxmox-backup-patches-for-qemu.patch
pve/0029-PVE-Backup-pbs-restore-new-command-to-restore-from-p.patch
pve/0030-PVE-Backup-avoid-coroutines-to-fix-AIO-freeze-cleanu.patch
pve/0031-drive-mirror-add-support-for-sync-bitmap-mode-never.patch
pve/0032-drive-mirror-add-support-for-conditional-and-always-.patch
pve/0033-mirror-add-check-for-bitmap-mode-without-bitmap.patch
pve/0034-mirror-switch-to-bdrv_dirty_bitmap_merge_internal.patch
pve/0035-iotests-add-test-for-bitmap-mirror.patch
pve/0036-mirror-move-some-checks-to-qmp.patch
pve/0037-PVE-Backup-Add-dirty-bitmap-tracking-for-incremental.patch
pve/0038-PVE-backup-rename-incremental-to-use-dirty-bitmap.patch
pve/0039-PVE-fixup-pbs-restore-API.patch
pve/0040-PVE-always-set-dirty-counter-for-non-incremental-bac.patch
pve/0041-PVE-use-proxmox_backup_check_incremental.patch
pve/0042-PVE-fixup-pbs-backup-add-compress-and-encrypt-option.patch
pve/0043-PVE-Add-PBS-block-driver-to-map-backup-archives-into.patch
pve/0044-PVE-add-query_proxmox_support-QMP-command.patch
pve/0045-pbs-fix-missing-crypt-and-compress-parameters.patch
pve/0046-PVE-handle-PBS-write-callback-with-big-blocks-correc.patch
pve/0047-PVE-add-zero-block-handling-to-PBS-dump-callback.patch
pve/0048-PVE-add-query-pbs-bitmap-info-QMP-call.patch
pve/0049-PVE-redirect-stderr-to-journal-when-daemonized.patch
pve/0050-PVE-Add-sequential-job-transaction-support.patch
pve/0051-PVE-Backup-Use-a-transaction-to-synchronize-job-stat.patch
pve/0052-PVE-Backup-Use-more-coroutines-and-don-t-block-on-fi.patch
pve/0053-PVE-fix-and-clean-up-error-handling-for-create_backu.patch
pve/0054-PVE-Migrate-dirty-bitmap-state-via-savevm.patch
pve/0055-migration-block-dirty-bitmap-migrate-other-bitmaps-e.patch
pve/0056-PVE-fix-aborting-multiple-CREATED-jobs-in-sequential.patch
pve/0057-PVE-fall-back-to-open-iscsi-initiatorname.patch
pve/0058-PVE-Use-coroutine-QMP-for-backup-cancel_backup.patch
pve/0059-PBS-add-master-key-support.patch
pve/0030-drive-mirror-add-support-for-sync-bitmap-mode-never.patch
pve/0031-drive-mirror-add-support-for-conditional-and-always-.patch
pve/0032-mirror-add-check-for-bitmap-mode-without-bitmap.patch
pve/0033-mirror-switch-to-bdrv_dirty_bitmap_merge_internal.patch
pve/0034-iotests-add-test-for-bitmap-mirror.patch
pve/0035-mirror-move-some-checks-to-qmp.patch
pve/0036-PVE-Backup-Add-dirty-bitmap-tracking-for-incremental.patch
pve/0037-PVE-various-PBS-fixes.patch
pve/0038-PVE-Add-PBS-block-driver-to-map-backup-archives-into.patch
pve/0039-PVE-add-query_proxmox_support-QMP-command.patch
pve/0040-PVE-add-query-pbs-bitmap-info-QMP-call.patch
pve/0041-PVE-redirect-stderr-to-journal-when-daemonized.patch
pve/0042-PVE-Add-sequential-job-transaction-support.patch
pve/0043-PVE-Backup-Use-a-transaction-to-synchronize-job-stat.patch
pve/0044-PVE-Backup-Don-t-block-on-finishing-and-cleanup-crea.patch
pve/0045-PVE-Migrate-dirty-bitmap-state-via-savevm.patch
pve/0046-migration-block-dirty-bitmap-migrate-other-bitmaps-e.patch
pve/0047-PVE-fall-back-to-open-iscsi-initiatorname.patch
pve/0048-PVE-Use-coroutine-QMP-for-backup-cancel_backup.patch
pve/0049-PBS-add-master-key-support.patch