32c96a9e46
(generated with debian/scripts/import-upstream-tag) Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
182 lines
5.3 KiB
Diff
182 lines
5.3 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Christoffer Dall <christoffer.dall@linaro.org>
|
|
Date: Mon, 4 Dec 2017 21:35:23 +0100
|
|
Subject: [PATCH] KVM: Take vcpu->mutex outside vcpu_load
|
|
|
|
As we're about to call vcpu_load() from architecture-specific
|
|
implementations of the KVM vcpu ioctls, but yet we access data
|
|
structures protected by the vcpu->mutex in the generic code, factor
|
|
this logic out from vcpu_load().
|
|
|
|
x86 is the only architecture which calls vcpu_load() outside of the main
|
|
vcpu ioctl function, and these calls will no longer take the vcpu mutex
|
|
following this patch. However, with the exception of
|
|
kvm_arch_vcpu_postcreate (see below), the callers are either in the
|
|
creation or destruction path of the VCPU, which means there cannot be
|
|
any concurrent access to the data structure, because the file descriptor
|
|
is not yet accessible, or is already gone.
|
|
|
|
kvm_arch_vcpu_postcreate makes the newly created vcpu potentially
|
|
accessible by other in-kernel threads through the kvm->vcpus array, and
|
|
we therefore take the vcpu mutex in this case directly.
|
|
|
|
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
|
|
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
(cherry picked from commit ec7660ccdd2b71d8c7f0243f8590253713e9b75d)
|
|
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
|
|
---
|
|
arch/x86/kvm/vmx.c | 4 +---
|
|
arch/x86/kvm/x86.c | 20 +++++++-------------
|
|
include/linux/kvm_host.h | 2 +-
|
|
virt/kvm/kvm_main.c | 17 ++++++-----------
|
|
4 files changed, 15 insertions(+), 28 deletions(-)
|
|
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index fbaae82d3025..521f8d0632fb 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -10015,10 +10015,8 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
|
|
static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
- int r;
|
|
|
|
- r = vcpu_load(vcpu);
|
|
- BUG_ON(r);
|
|
+ vcpu_load(vcpu);
|
|
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
|
free_nested(vmx);
|
|
vcpu_put(vcpu);
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index 012bfad459ed..e42319badda3 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -8037,17 +8037,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
|
|
|
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
{
|
|
- int r;
|
|
-
|
|
vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
|
|
kvm_vcpu_mtrr_init(vcpu);
|
|
- r = vcpu_load(vcpu);
|
|
- if (r)
|
|
- return r;
|
|
+ vcpu_load(vcpu);
|
|
kvm_vcpu_reset(vcpu, false);
|
|
kvm_mmu_setup(vcpu);
|
|
vcpu_put(vcpu);
|
|
- return r;
|
|
+ return 0;
|
|
}
|
|
|
|
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
|
@@ -8057,13 +8053,15 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
|
|
|
kvm_hv_vcpu_postcreate(vcpu);
|
|
|
|
- if (vcpu_load(vcpu))
|
|
+ if (mutex_lock_killable(&vcpu->mutex))
|
|
return;
|
|
+ vcpu_load(vcpu);
|
|
msr.data = 0x0;
|
|
msr.index = MSR_IA32_TSC;
|
|
msr.host_initiated = true;
|
|
kvm_write_tsc(vcpu, &msr);
|
|
vcpu_put(vcpu);
|
|
+ mutex_unlock(&vcpu->mutex);
|
|
|
|
if (!kvmclock_periodic_sync)
|
|
return;
|
|
@@ -8074,11 +8072,9 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
|
|
|
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|
{
|
|
- int r;
|
|
vcpu->arch.apf.msr_val = 0;
|
|
|
|
- r = vcpu_load(vcpu);
|
|
- BUG_ON(r);
|
|
+ vcpu_load(vcpu);
|
|
kvm_mmu_unload(vcpu);
|
|
vcpu_put(vcpu);
|
|
|
|
@@ -8458,9 +8454,7 @@ int kvm_arch_post_init_vm(struct kvm *kvm)
|
|
|
|
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
|
|
{
|
|
- int r;
|
|
- r = vcpu_load(vcpu);
|
|
- BUG_ON(r);
|
|
+ vcpu_load(vcpu);
|
|
kvm_mmu_unload(vcpu);
|
|
vcpu_put(vcpu);
|
|
}
|
|
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
|
|
index 927d0002d9c9..f2c024661513 100644
|
|
--- a/include/linux/kvm_host.h
|
|
+++ b/include/linux/kvm_host.h
|
|
@@ -534,7 +534,7 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
|
|
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
|
|
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
|
|
|
|
-int __must_check vcpu_load(struct kvm_vcpu *vcpu);
|
|
+void vcpu_load(struct kvm_vcpu *vcpu);
|
|
void vcpu_put(struct kvm_vcpu *vcpu);
|
|
|
|
#ifdef __KVM_HAVE_IOAPIC
|
|
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
|
index a715074f424e..010c6366a1d5 100644
|
|
--- a/virt/kvm/kvm_main.c
|
|
+++ b/virt/kvm/kvm_main.c
|
|
@@ -152,17 +152,12 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
|
|
/*
|
|
* Switches to specified vcpu, until a matching vcpu_put()
|
|
*/
|
|
-int vcpu_load(struct kvm_vcpu *vcpu)
|
|
+void vcpu_load(struct kvm_vcpu *vcpu)
|
|
{
|
|
- int cpu;
|
|
-
|
|
- if (mutex_lock_killable(&vcpu->mutex))
|
|
- return -EINTR;
|
|
- cpu = get_cpu();
|
|
+ int cpu = get_cpu();
|
|
preempt_notifier_register(&vcpu->preempt_notifier);
|
|
kvm_arch_vcpu_load(vcpu, cpu);
|
|
put_cpu();
|
|
- return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(vcpu_load);
|
|
|
|
@@ -172,7 +167,6 @@ void vcpu_put(struct kvm_vcpu *vcpu)
|
|
kvm_arch_vcpu_put(vcpu);
|
|
preempt_notifier_unregister(&vcpu->preempt_notifier);
|
|
preempt_enable();
|
|
- mutex_unlock(&vcpu->mutex);
|
|
}
|
|
EXPORT_SYMBOL_GPL(vcpu_put);
|
|
|
|
@@ -2619,9 +2613,9 @@ static long kvm_vcpu_ioctl(struct file *filp,
|
|
#endif
|
|
|
|
|
|
- r = vcpu_load(vcpu);
|
|
- if (r)
|
|
- return r;
|
|
+ if (mutex_lock_killable(&vcpu->mutex))
|
|
+ return -EINTR;
|
|
+ vcpu_load(vcpu);
|
|
switch (ioctl) {
|
|
case KVM_RUN: {
|
|
struct pid *oldpid;
|
|
@@ -2794,6 +2788,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
|
|
}
|
|
out:
|
|
vcpu_put(vcpu);
|
|
+ mutex_unlock(&vcpu->mutex);
|
|
kfree(fpu);
|
|
kfree(kvm_sregs);
|
|
return r;
|