61 lines
2.5 KiB
Diff
61 lines
2.5 KiB
Diff
|
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||
|
From: Paolo Bonzini <pbonzini@redhat.com>
|
||
|
Date: Thu, 4 Aug 2022 15:28:32 +0200
|
||
|
Subject: [PATCH] KVM: x86: revalidate steal time cache if MSR value changes
|
||
|
|
||
|
commit 901d3765fa804ce42812f1d5b1f3de2dfbb26723 upstream.
|
||
|
|
||
|
Commit 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time
|
||
|
/ preempted status", 2021-11-11) open coded the previous call to
|
||
|
kvm_map_gfn, but in doing so it dropped the comparison between the cached
|
||
|
guest physical address and the one in the MSR. This cause an incorrect
|
||
|
cache hit if the guest modifies the steal time address while the memslots
|
||
|
remain the same. This can happen with kexec, in which case the steal
|
||
|
time data is written at the address used by the old kernel instead of
|
||
|
the old one.
|
||
|
|
||
|
While at it, rename the variable from gfn to gpa since it is a plain
|
||
|
physical address and not a right-shifted one.
|
||
|
|
||
|
Reported-by: Dave Young <ruyang@redhat.com>
|
||
|
Reported-by: Xiaoying Yan <yiyan@redhat.com>
|
||
|
Analyzed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
||
|
Cc: David Woodhouse <dwmw@amazon.co.uk>
|
||
|
Cc: stable@vger.kernel.org
|
||
|
Fixes: 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time / preempted status")
|
||
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
||
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||
|
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
|
||
|
---
|
||
|
arch/x86/kvm/x86.c | 6 +++---
|
||
|
1 file changed, 3 insertions(+), 3 deletions(-)
|
||
|
|
||
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
||
|
index 461c9d815d6c..b46677baf396 100644
|
||
|
--- a/arch/x86/kvm/x86.c
|
||
|
+++ b/arch/x86/kvm/x86.c
|
||
|
@@ -3236,6 +3236,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
|
||
|
struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
|
||
|
struct kvm_steal_time __user *st;
|
||
|
struct kvm_memslots *slots;
|
||
|
+ gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
|
||
|
u64 steal;
|
||
|
u32 version;
|
||
|
|
||
|
@@ -3253,13 +3254,12 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
|
||
|
slots = kvm_memslots(vcpu->kvm);
|
||
|
|
||
|
if (unlikely(slots->generation != ghc->generation ||
|
||
|
+ gpa != ghc->gpa ||
|
||
|
kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
|
||
|
- gfn_t gfn = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
|
||
|
-
|
||
|
/* We rely on the fact that it fits in a single page. */
|
||
|
BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
|
||
|
|
||
|
- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gfn, sizeof(*st)) ||
|
||
|
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) ||
|
||
|
kvm_is_error_hva(ghc->hva) || !ghc->memslot)
|
||
|
return;
|
||
|
}
|