633c5ed17f
this causes kernel OOPS and upstream is unresponsive about it. see https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1726519
99 lines
3.3 KiB
Diff
99 lines
3.3 KiB
Diff
From 711a55c9d58955a2bfca89cd25935ca607e49bc0 Mon Sep 17 00:00:00 2001
|
|
From: Jim Mattson <jmattson@google.com>
|
|
Date: Wed, 3 Jan 2018 14:31:38 -0800
|
|
Subject: [PATCH 232/242] kvm: vmx: Scrub hardware GPRs at VM-exit
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
Guest GPR values are live in the hardware GPRs at VM-exit. Do not
|
|
leave any guest values in hardware GPRs after the guest GPR values are
|
|
saved to the vcpu_vmx structure.
|
|
|
|
This is a partial mitigation for CVE 2017-5715 and CVE 2017-5753.
|
|
Specifically, it defeats the Project Zero PoC for CVE 2017-5715.
|
|
|
|
Suggested-by: Eric Northup <digitaleric@google.com>
|
|
Signed-off-by: Jim Mattson <jmattson@google.com>
|
|
Reviewed-by: Eric Northup <digitaleric@google.com>
|
|
Reviewed-by: Benjamin Serebrin <serebrin@google.com>
|
|
Reviewed-by: Andrew Honig <ahonig@google.com>
|
|
[Paolo: Add AMD bits, Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>]
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
|
---
|
|
arch/x86/kvm/svm.c | 19 +++++++++++++++++++
|
|
arch/x86/kvm/vmx.c | 14 +++++++++++++-
|
|
2 files changed, 32 insertions(+), 1 deletion(-)
|
|
|
|
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
|
|
index af09baa3d736..92cd94d51e1f 100644
|
|
--- a/arch/x86/kvm/svm.c
|
|
+++ b/arch/x86/kvm/svm.c
|
|
@@ -4924,6 +4924,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|
"mov %%r13, %c[r13](%[svm]) \n\t"
|
|
"mov %%r14, %c[r14](%[svm]) \n\t"
|
|
"mov %%r15, %c[r15](%[svm]) \n\t"
|
|
+#endif
|
|
+ /*
|
|
+ * Clear host registers marked as clobbered to prevent
|
|
+ * speculative use.
|
|
+ */
|
|
+ "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
|
|
+ "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
|
|
+ "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
|
|
+ "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
|
|
+ "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
|
|
+#ifdef CONFIG_X86_64
|
|
+ "xor %%r8, %%r8 \n\t"
|
|
+ "xor %%r9, %%r9 \n\t"
|
|
+ "xor %%r10, %%r10 \n\t"
|
|
+ "xor %%r11, %%r11 \n\t"
|
|
+ "xor %%r12, %%r12 \n\t"
|
|
+ "xor %%r13, %%r13 \n\t"
|
|
+ "xor %%r14, %%r14 \n\t"
|
|
+ "xor %%r15, %%r15 \n\t"
|
|
#endif
|
|
"pop %%" _ASM_BP
|
|
:
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index d61986a36575..9b4256fd589a 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -9140,6 +9140,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
/* Save guest registers, load host registers, keep flags */
|
|
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
|
|
"pop %0 \n\t"
|
|
+ "setbe %c[fail](%0)\n\t"
|
|
"mov %%" _ASM_AX ", %c[rax](%0) \n\t"
|
|
"mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
|
|
__ASM_SIZE(pop) " %c[rcx](%0) \n\t"
|
|
@@ -9156,12 +9157,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
"mov %%r13, %c[r13](%0) \n\t"
|
|
"mov %%r14, %c[r14](%0) \n\t"
|
|
"mov %%r15, %c[r15](%0) \n\t"
|
|
+ "xor %%r8d, %%r8d \n\t"
|
|
+ "xor %%r9d, %%r9d \n\t"
|
|
+ "xor %%r10d, %%r10d \n\t"
|
|
+ "xor %%r11d, %%r11d \n\t"
|
|
+ "xor %%r12d, %%r12d \n\t"
|
|
+ "xor %%r13d, %%r13d \n\t"
|
|
+ "xor %%r14d, %%r14d \n\t"
|
|
+ "xor %%r15d, %%r15d \n\t"
|
|
#endif
|
|
"mov %%cr2, %%" _ASM_AX " \n\t"
|
|
"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
|
|
|
|
+ "xor %%eax, %%eax \n\t"
|
|
+ "xor %%ebx, %%ebx \n\t"
|
|
+ "xor %%esi, %%esi \n\t"
|
|
+ "xor %%edi, %%edi \n\t"
|
|
"pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
|
|
- "setbe %c[fail](%0) \n\t"
|
|
".pushsection .rodata \n\t"
|
|
".global vmx_return \n\t"
|
|
"vmx_return: " _ASM_PTR " 2b \n\t"
|
|
--
|
|
2.14.2
|
|
|