155 lines
5.3 KiB
Diff
155 lines
5.3 KiB
Diff
From a97c6afa806d4fe6475a2d9215ff57367ee34b72 Mon Sep 17 00:00:00 2001
|
|
From: Andy Lutomirski <luto@kernel.org>
|
|
Date: Thu, 2 Nov 2017 00:59:16 -0700
|
|
Subject: [PATCH 104/241] x86/entry/64: Remove thread_struct::sp0
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
CVE-2017-5754
|
|
|
|
On x86_64, we can easily calculate sp0 when needed instead of
|
|
storing it in thread_struct.
|
|
|
|
On x86_32, a similar cleanup would be possible, but it would require
|
|
cleaning up the vm86 code first, and that can wait for a later
|
|
cleanup series.
|
|
|
|
Signed-off-by: Andy Lutomirski <luto@kernel.org>
|
|
Cc: Borislav Petkov <bpetkov@suse.de>
|
|
Cc: Brian Gerst <brgerst@gmail.com>
|
|
Cc: Dave Hansen <dave.hansen@intel.com>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Cc: Thomas Gleixner <tglx@linutronix.de>
|
|
Link: http://lkml.kernel.org/r/719cd9c66c548c4350d98a90f050aee8b17f8919.1509609304.git.luto@kernel.org
|
|
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
|
(cherry picked from commit d375cf1530595e33961a8844192cddab913650e3)
|
|
Signed-off-by: Andy Whitcroft <apw@canonical.com>
|
|
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
|
|
(cherry picked from commit 4910af19c69a87e9432467f4d7cb78da5fbcc30a)
|
|
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
|
---
|
|
arch/x86/include/asm/compat.h | 1 +
|
|
arch/x86/include/asm/processor.h | 28 +++++++++-------------------
|
|
arch/x86/include/asm/switch_to.h | 6 ++++++
|
|
arch/x86/kernel/process_64.c | 1 -
|
|
4 files changed, 16 insertions(+), 20 deletions(-)
|
|
|
|
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
|
|
index 5343c19814b3..948b6d8ec46f 100644
|
|
--- a/arch/x86/include/asm/compat.h
|
|
+++ b/arch/x86/include/asm/compat.h
|
|
@@ -6,6 +6,7 @@
|
|
*/
|
|
#include <linux/types.h>
|
|
#include <linux/sched.h>
|
|
+#include <linux/sched/task_stack.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/user32.h>
|
|
#include <asm/unistd.h>
|
|
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
|
|
index f83fbf1b6dd9..cec9a329c0f1 100644
|
|
--- a/arch/x86/include/asm/processor.h
|
|
+++ b/arch/x86/include/asm/processor.h
|
|
@@ -423,7 +423,9 @@ typedef struct {
|
|
struct thread_struct {
|
|
/* Cached TLS descriptors: */
|
|
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
|
|
+#ifdef CONFIG_X86_32
|
|
unsigned long sp0;
|
|
+#endif
|
|
unsigned long sp;
|
|
#ifdef CONFIG_X86_32
|
|
unsigned long sysenter_cs;
|
|
@@ -790,6 +792,13 @@ static inline void spin_lock_prefetch(const void *x)
|
|
|
|
#define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))
|
|
|
|
+#define task_pt_regs(task) \
|
|
+({ \
|
|
+ unsigned long __ptr = (unsigned long)task_stack_page(task); \
|
|
+ __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
|
|
+ ((struct pt_regs *)__ptr) - 1; \
|
|
+})
|
|
+
|
|
#ifdef CONFIG_X86_32
|
|
/*
|
|
* User space process size: 3GB (default).
|
|
@@ -807,23 +816,6 @@ static inline void spin_lock_prefetch(const void *x)
|
|
.addr_limit = KERNEL_DS, \
|
|
}
|
|
|
|
-/*
|
|
- * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
|
|
- * This is necessary to guarantee that the entire "struct pt_regs"
|
|
- * is accessible even if the CPU haven't stored the SS/ESP registers
|
|
- * on the stack (interrupt gate does not save these registers
|
|
- * when switching to the same priv ring).
|
|
- * Therefore beware: accessing the ss/esp fields of the
|
|
- * "struct pt_regs" is possible, but they may contain the
|
|
- * completely wrong values.
|
|
- */
|
|
-#define task_pt_regs(task) \
|
|
-({ \
|
|
- unsigned long __ptr = (unsigned long)task_stack_page(task); \
|
|
- __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
|
|
- ((struct pt_regs *)__ptr) - 1; \
|
|
-})
|
|
-
|
|
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
|
|
|
|
#else
|
|
@@ -853,11 +845,9 @@ static inline void spin_lock_prefetch(const void *x)
|
|
#define STACK_TOP_MAX TASK_SIZE_MAX
|
|
|
|
#define INIT_THREAD { \
|
|
- .sp0 = TOP_OF_INIT_STACK, \
|
|
.addr_limit = KERNEL_DS, \
|
|
}
|
|
|
|
-#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
|
|
extern unsigned long KSTK_ESP(struct task_struct *task);
|
|
|
|
#endif /* CONFIG_X86_64 */
|
|
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
|
|
index 54e64d909725..010cd6e4eafc 100644
|
|
--- a/arch/x86/include/asm/switch_to.h
|
|
+++ b/arch/x86/include/asm/switch_to.h
|
|
@@ -1,6 +1,8 @@
|
|
#ifndef _ASM_X86_SWITCH_TO_H
|
|
#define _ASM_X86_SWITCH_TO_H
|
|
|
|
+#include <linux/sched/task_stack.h>
|
|
+
|
|
struct task_struct; /* one of the stranger aspects of C forward declarations */
|
|
|
|
struct task_struct *__switch_to_asm(struct task_struct *prev,
|
|
@@ -87,7 +89,11 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
|
|
/* This is used when switching tasks or entering/exiting vm86 mode. */
|
|
static inline void update_sp0(struct task_struct *task)
|
|
{
|
|
+#ifdef CONFIG_X86_32
|
|
load_sp0(task->thread.sp0);
|
|
+#else
|
|
+ load_sp0(task_top_of_stack(task));
|
|
+#endif
|
|
}
|
|
|
|
#endif /* _ASM_X86_SWITCH_TO_H */
|
|
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
|
|
index 8a748e17bf6e..b08b9b6c40eb 100644
|
|
--- a/arch/x86/kernel/process_64.c
|
|
+++ b/arch/x86/kernel/process_64.c
|
|
@@ -275,7 +275,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
|
|
struct inactive_task_frame *frame;
|
|
struct task_struct *me = current;
|
|
|
|
- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
|
|
childregs = task_pt_regs(p);
|
|
fork_frame = container_of(childregs, struct fork_frame, regs);
|
|
frame = &fork_frame->frame;
|
|
--
|
|
2.14.2
|
|
|