101 lines
3.6 KiB
Diff
101 lines
3.6 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Dave Hansen <dave.hansen@linux.intel.com>
|
|
Date: Mon, 4 Dec 2017 15:07:34 +0100
|
|
Subject: [PATCH] x86/mm/pti: Disable global pages if PAGE_TABLE_ISOLATION=y
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
CVE-2017-5754
|
|
|
|
Global pages stay in the TLB across context switches. Since all contexts
|
|
share the same kernel mapping, these mappings are marked as global pages
|
|
so kernel entries in the TLB are not flushed out on a context switch.
|
|
|
|
But, even having these entries in the TLB opens up something that an
|
|
attacker can use, such as the double-page-fault attack:
|
|
|
|
http://www.ieee-security.org/TC/SP2013/papers/4977a191.pdf
|
|
|
|
That means that even when PAGE_TABLE_ISOLATION switches page tables
|
|
on return to user space the global pages would stay in the TLB cache.
|
|
|
|
Disable global pages so that kernel TLB entries can be flushed before
|
|
returning to user space. This way, all accesses to kernel addresses from
|
|
userspace result in a TLB miss independent of the existence of a kernel
|
|
mapping.
|
|
|
|
Suppress global pages via the __supported_pte_mask. The user space
|
|
mappings set PAGE_GLOBAL for the minimal kernel mappings which are
|
|
required for entry/exit. These mappings are set up manually so the
|
|
filtering does not take place.
|
|
|
|
[ The __supported_pte_mask simplification was written by Thomas Gleixner. ]
|
|
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Cc: Andy Lutomirski <luto@kernel.org>
|
|
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
Cc: Borislav Petkov <bp@alien8.de>
|
|
Cc: Brian Gerst <brgerst@gmail.com>
|
|
Cc: David Laight <David.Laight@aculab.com>
|
|
Cc: Denys Vlasenko <dvlasenk@redhat.com>
|
|
Cc: Eduardo Valentin <eduval@amazon.com>
|
|
Cc: Greg KH <gregkh@linuxfoundation.org>
|
|
Cc: H. Peter Anvin <hpa@zytor.com>
|
|
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
|
|
Cc: Juergen Gross <jgross@suse.com>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Cc: Will Deacon <will.deacon@arm.com>
|
|
Cc: aliguori@amazon.com
|
|
Cc: daniel.gruss@iaik.tugraz.at
|
|
Cc: hughd@google.com
|
|
Cc: keescook@google.com
|
|
Cc: linux-mm@kvack.org
|
|
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
|
(cherry picked from commit c313ec66317d421fb5768d78c56abed2dc862264)
|
|
Signed-off-by: Andy Whitcroft <apw@canonical.com>
|
|
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
|
|
(cherry picked from commit ace78e99d765da1e59f6b151adac6c360c67af7d)
|
|
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
|
---
|
|
arch/x86/mm/init.c | 12 +++++++++---
|
|
1 file changed, 9 insertions(+), 3 deletions(-)
|
|
|
|
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
|
|
index a22c2b95e513..020223420308 100644
|
|
--- a/arch/x86/mm/init.c
|
|
+++ b/arch/x86/mm/init.c
|
|
@@ -161,6 +161,12 @@ struct map_range {
|
|
|
|
static int page_size_mask;
|
|
|
|
+static void enable_global_pages(void)
|
|
+{
|
|
+ if (!static_cpu_has(X86_FEATURE_PTI))
|
|
+ __supported_pte_mask |= _PAGE_GLOBAL;
|
|
+}
|
|
+
|
|
static void __init probe_page_size_mask(void)
|
|
{
|
|
/*
|
|
@@ -179,11 +185,11 @@ static void __init probe_page_size_mask(void)
|
|
cr4_set_bits_and_update_boot(X86_CR4_PSE);
|
|
|
|
/* Enable PGE if available */
|
|
+ __supported_pte_mask &= ~_PAGE_GLOBAL;
|
|
if (boot_cpu_has(X86_FEATURE_PGE)) {
|
|
cr4_set_bits_and_update_boot(X86_CR4_PGE);
|
|
- __supported_pte_mask |= _PAGE_GLOBAL;
|
|
- } else
|
|
- __supported_pte_mask &= ~_PAGE_GLOBAL;
|
|
+ enable_global_pages();
|
|
+ }
|
|
|
|
/* Enable 1 GB linear kernel mappings if available: */
|
|
if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
|
|
--
|
|
2.14.2
|
|
|