153 lines
4.9 KiB
Diff
153 lines
4.9 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Andy Lutomirski <luto@kernel.org>
|
|
Date: Mon, 7 Aug 2017 20:59:21 -0700
|
|
Subject: [PATCH] x86/xen/64: Rearrange the SYSCALL entries
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
CVE-2017-5754
|
|
|
|
Xen's raw SYSCALL entries are much less weird than native. Rather
|
|
than fudging them to look like native entries, use the Xen-provided
|
|
stack frame directly.
|
|
|
|
This lets us eliminate entry_SYSCALL_64_after_swapgs and two uses of
|
|
the SWAPGS_UNSAFE_STACK paravirt hook. The SYSENTER code would
|
|
benefit from similar treatment.
|
|
|
|
This makes one change to the native code path: the compat
|
|
instruction that clears the high 32 bits of %rax is moved slightly
|
|
later. I'd be surprised if this affects performance at all.
|
|
|
|
Tested-by: Juergen Gross <jgross@suse.com>
|
|
Signed-off-by: Andy Lutomirski <luto@kernel.org>
|
|
Reviewed-by: Juergen Gross <jgross@suse.com>
|
|
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
Cc: Borislav Petkov <bpetkov@suse.de>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Cc: Thomas Gleixner <tglx@linutronix.de>
|
|
Cc: xen-devel@lists.xenproject.org
|
|
Link: http://lkml.kernel.org/r/7c88ed36805d36841ab03ec3b48b4122c4418d71.1502164668.git.luto@kernel.org
|
|
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
|
(cherry picked from commit 8a9949bc71a71b3dd633255ebe8f8869b1f73474)
|
|
Signed-off-by: Andy Whitcroft <apw@canonical.com>
|
|
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
|
|
(cherry picked from commit b8cec41ee5f30df5032cfe8c86103f7d92a89590)
|
|
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
|
---
|
|
arch/x86/entry/entry_64.S | 9 ++-------
|
|
arch/x86/entry/entry_64_compat.S | 7 +++----
|
|
arch/x86/xen/xen-asm_64.S | 23 +++++++++--------------
|
|
3 files changed, 14 insertions(+), 25 deletions(-)
|
|
|
|
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
|
|
index 64b233ab7cad..4dbb336a1fdd 100644
|
|
--- a/arch/x86/entry/entry_64.S
|
|
+++ b/arch/x86/entry/entry_64.S
|
|
@@ -142,14 +142,8 @@ ENTRY(entry_SYSCALL_64)
|
|
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
|
|
* it is too small to ever cause noticeable irq latency.
|
|
*/
|
|
- SWAPGS_UNSAFE_STACK
|
|
- /*
|
|
- * A hypervisor implementation might want to use a label
|
|
- * after the swapgs, so that it can do the swapgs
|
|
- * for the guest and jump here on syscall.
|
|
- */
|
|
-GLOBAL(entry_SYSCALL_64_after_swapgs)
|
|
|
|
+ swapgs
|
|
movq %rsp, PER_CPU_VAR(rsp_scratch)
|
|
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
|
|
|
@@ -161,6 +155,7 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
|
|
pushq %r11 /* pt_regs->flags */
|
|
pushq $__USER_CS /* pt_regs->cs */
|
|
pushq %rcx /* pt_regs->ip */
|
|
+GLOBAL(entry_SYSCALL_64_after_hwframe)
|
|
pushq %rax /* pt_regs->orig_ax */
|
|
pushq %rdi /* pt_regs->di */
|
|
pushq %rsi /* pt_regs->si */
|
|
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
|
|
index e1721dafbcb1..5314d7b8e5ad 100644
|
|
--- a/arch/x86/entry/entry_64_compat.S
|
|
+++ b/arch/x86/entry/entry_64_compat.S
|
|
@@ -183,21 +183,20 @@ ENDPROC(entry_SYSENTER_compat)
|
|
*/
|
|
ENTRY(entry_SYSCALL_compat)
|
|
/* Interrupts are off on entry. */
|
|
- SWAPGS_UNSAFE_STACK
|
|
+ swapgs
|
|
|
|
/* Stash user ESP and switch to the kernel stack. */
|
|
movl %esp, %r8d
|
|
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
|
|
|
- /* Zero-extending 32-bit regs, do not remove */
|
|
- movl %eax, %eax
|
|
-
|
|
/* Construct struct pt_regs on stack */
|
|
pushq $__USER32_DS /* pt_regs->ss */
|
|
pushq %r8 /* pt_regs->sp */
|
|
pushq %r11 /* pt_regs->flags */
|
|
pushq $__USER32_CS /* pt_regs->cs */
|
|
pushq %rcx /* pt_regs->ip */
|
|
+GLOBAL(entry_SYSCALL_compat_after_hwframe)
|
|
+ movl %eax, %eax /* discard orig_ax high bits */
|
|
pushq %rax /* pt_regs->orig_ax */
|
|
pushq %rdi /* pt_regs->di */
|
|
pushq %rsi /* pt_regs->si */
|
|
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
|
|
index c3df43141e70..a8a4f4c460a6 100644
|
|
--- a/arch/x86/xen/xen-asm_64.S
|
|
+++ b/arch/x86/xen/xen-asm_64.S
|
|
@@ -82,34 +82,29 @@ RELOC(xen_sysret64, 1b+1)
|
|
* rip
|
|
* r11
|
|
* rsp->rcx
|
|
- *
|
|
- * In all the entrypoints, we undo all that to make it look like a
|
|
- * CPU-generated syscall/sysenter and jump to the normal entrypoint.
|
|
*/
|
|
|
|
-.macro undo_xen_syscall
|
|
- mov 0*8(%rsp), %rcx
|
|
- mov 1*8(%rsp), %r11
|
|
- mov 5*8(%rsp), %rsp
|
|
-.endm
|
|
-
|
|
/* Normal 64-bit system call target */
|
|
ENTRY(xen_syscall_target)
|
|
- undo_xen_syscall
|
|
- jmp entry_SYSCALL_64_after_swapgs
|
|
+ popq %rcx
|
|
+ popq %r11
|
|
+ jmp entry_SYSCALL_64_after_hwframe
|
|
ENDPROC(xen_syscall_target)
|
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
|
|
/* 32-bit compat syscall target */
|
|
ENTRY(xen_syscall32_target)
|
|
- undo_xen_syscall
|
|
- jmp entry_SYSCALL_compat
|
|
+ popq %rcx
|
|
+ popq %r11
|
|
+ jmp entry_SYSCALL_compat_after_hwframe
|
|
ENDPROC(xen_syscall32_target)
|
|
|
|
/* 32-bit compat sysenter target */
|
|
ENTRY(xen_sysenter_target)
|
|
- undo_xen_syscall
|
|
+ mov 0*8(%rsp), %rcx
|
|
+ mov 1*8(%rsp), %r11
|
|
+ mov 5*8(%rsp), %rsp
|
|
jmp entry_SYSENTER_compat
|
|
ENDPROC(xen_sysenter_target)
|
|
|
|
--
|
|
2.14.2
|
|
|