Add tentative fix for QMP hang
Not exactly as sent upstream[0] since we're missing a change in our v5.2.0 branch (irrelevant for us), but functionally works the same. [0] https://lists.gnu.org/archive/html/qemu-devel/2021-03/msg07590.html
This commit is contained in:
parent
154a7c0f8d
commit
bb751cab32
48
debian/patches/extra/0010-monitor-qmp-fix-race-on-CHR_EVENT_CLOSED-without-OOB.patch
vendored
Normal file
48
debian/patches/extra/0010-monitor-qmp-fix-race-on-CHR_EVENT_CLOSED-without-OOB.patch
vendored
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Stefan Reiter <s.reiter@proxmox.com>
|
||||||
|
Date: Mon, 22 Mar 2021 15:20:04 +0100
|
||||||
|
Subject: [PATCH] monitor/qmp: fix race on CHR_EVENT_CLOSED without OOB
|
||||||
|
|
||||||
|
The QMP dispatcher coroutine holds the qmp_queue_lock over a yield
|
||||||
|
point, where it expects to be rescheduled from the main context. If a
|
||||||
|
CHR_EVENT_CLOSED event is received just then, it can race and block the
|
||||||
|
main thread on the mutex in monitor_qmp_cleanup_queue_and_resume.
|
||||||
|
|
||||||
|
Calculate need_resume immediately after we pop a request from the queue,
|
||||||
|
so that we can release the mutex before yielding.
|
||||||
|
|
||||||
|
Suggested-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
|
||||||
|
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
|
||||||
|
---
|
||||||
|
monitor/qmp.c | 11 ++++++-----
|
||||||
|
1 file changed, 6 insertions(+), 5 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/monitor/qmp.c b/monitor/qmp.c
|
||||||
|
index 2e37d11bd3..2aff833f7a 100644
|
||||||
|
--- a/monitor/qmp.c
|
||||||
|
+++ b/monitor/qmp.c
|
||||||
|
@@ -252,6 +252,12 @@ void coroutine_fn monitor_qmp_dispatcher_co(void *data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
+ mon = req_obj->mon;
|
||||||
|
+ /* qmp_oob_enabled() might change after "qmp_capabilities" */
|
||||||
|
+ need_resume = !qmp_oob_enabled(mon) ||
|
||||||
|
+ mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1;
|
||||||
|
+ qemu_mutex_unlock(&mon->qmp_queue_lock);
|
||||||
|
+
|
||||||
|
if (qatomic_xchg(&qmp_dispatcher_co_busy, true) == true) {
|
||||||
|
/*
|
||||||
|
* Someone rescheduled us (probably because a new requests
|
||||||
|
@@ -270,11 +276,6 @@ void coroutine_fn monitor_qmp_dispatcher_co(void *data)
|
||||||
|
aio_co_schedule(qemu_get_aio_context(), qmp_dispatcher_co);
|
||||||
|
qemu_coroutine_yield();
|
||||||
|
|
||||||
|
- mon = req_obj->mon;
|
||||||
|
- /* qmp_oob_enabled() might change after "qmp_capabilities" */
|
||||||
|
- need_resume = !qmp_oob_enabled(mon) ||
|
||||||
|
- mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1;
|
||||||
|
- qemu_mutex_unlock(&mon->qmp_queue_lock);
|
||||||
|
if (req_obj->req) {
|
||||||
|
QDict *qdict = qobject_to(QDict, req_obj->req);
|
||||||
|
QObject *id = qdict ? qdict_get(qdict, "id") : NULL;
|
1
debian/patches/series
vendored
1
debian/patches/series
vendored
@ -7,6 +7,7 @@ extra/0006-virtiofsd-prevent-opening-of-special-files-CVE-2020-.patch
|
|||||||
extra/0007-virtiofsd-Add-_llseek-to-the-seccomp-whitelist.patch
|
extra/0007-virtiofsd-Add-_llseek-to-the-seccomp-whitelist.patch
|
||||||
extra/0008-virtiofsd-Add-restart_syscall-to-the-seccomp-whiteli.patch
|
extra/0008-virtiofsd-Add-restart_syscall-to-the-seccomp-whiteli.patch
|
||||||
extra/0009-i386-acpi-restore-device-paths-for-pre-5.1-vms.patch
|
extra/0009-i386-acpi-restore-device-paths-for-pre-5.1-vms.patch
|
||||||
|
extra/0010-monitor-qmp-fix-race-on-CHR_EVENT_CLOSED-without-OOB.patch
|
||||||
bitmap-mirror/0001-drive-mirror-add-support-for-sync-bitmap-mode-never.patch
|
bitmap-mirror/0001-drive-mirror-add-support-for-sync-bitmap-mode-never.patch
|
||||||
bitmap-mirror/0002-drive-mirror-add-support-for-conditional-and-always-.patch
|
bitmap-mirror/0002-drive-mirror-add-support-for-conditional-and-always-.patch
|
||||||
bitmap-mirror/0003-mirror-add-check-for-bitmap-mode-without-bitmap.patch
|
bitmap-mirror/0003-mirror-add-check-for-bitmap-mode-without-bitmap.patch
|
||||||
|
Loading…
Reference in New Issue
Block a user