4fc427d906
(generated with debian/scripts/import-upstream-tag) Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
48 lines
1.9 KiB
Diff
48 lines
1.9 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Paolo Bonzini <pbonzini@redhat.com>
|
|
Date: Thu, 4 Aug 2022 15:28:32 +0200
|
|
Subject: [PATCH] KVM: x86: do not report preemption if the steal time cache is
|
|
stale
|
|
|
|
commit c3c28d24d910a746b02f496d190e0e8c6560224b upstream.
|
|
|
|
Commit 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time
|
|
/ preempted status", 2021-11-11) open coded the previous call to
|
|
kvm_map_gfn, but in doing so it dropped the comparison between the cached
|
|
guest physical address and the one in the MSR. This cause an incorrect
|
|
cache hit if the guest modifies the steal time address while the memslots
|
|
remain the same. This can happen with kexec, in which case the preempted
|
|
bit is written at the address used by the old kernel instead of
|
|
the old one.
|
|
|
|
Cc: David Woodhouse <dwmw@amazon.co.uk>
|
|
Cc: stable@vger.kernel.org
|
|
Fixes: 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time / preempted status")
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
|
|
---
|
|
arch/x86/kvm/x86.c | 2 ++
|
|
1 file changed, 2 insertions(+)
|
|
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index 12b6dde48d03..d915dc8a964a 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -4629,6 +4629,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
|
struct kvm_steal_time __user *st;
|
|
struct kvm_memslots *slots;
|
|
static const u8 preempted = KVM_VCPU_PREEMPTED;
|
|
+ gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
|
|
|
|
/*
|
|
* The vCPU can be marked preempted if and only if the VM-Exit was on
|
|
@@ -4656,6 +4657,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
|
slots = kvm_memslots(vcpu->kvm);
|
|
|
|
if (unlikely(slots->generation != ghc->generation ||
|
|
+ gpa != ghc->gpa ||
|
|
kvm_is_error_hva(ghc->hva) || !ghc->memslot))
|
|
return;
|
|
|