193 lines
6.0 KiB
Diff
193 lines
6.0 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Dave Hansen <dave.hansen@linux.intel.com>
|
|
Date: Mon, 4 Dec 2017 15:07:57 +0100
|
|
Subject: [PATCH] x86/mm: Allow flushing for future ASID switches
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
CVE-2017-5754
|
|
|
|
If changing the page tables in such a way that an invalidation of all
|
|
contexts (aka. PCIDs / ASIDs) is required, they can be actively invalidated
|
|
by:
|
|
|
|
1. INVPCID for each PCID (works for single pages too).
|
|
|
|
2. Load CR3 with each PCID without the NOFLUSH bit set
|
|
|
|
3. Load CR3 with the NOFLUSH bit set for each and do INVLPG for each address.
|
|
|
|
But, none of these are really feasible since there are ~6 ASIDs (12 with
|
|
PAGE_TABLE_ISOLATION) at the time that invalidation is required.
|
|
Instead of actively invalidating them, invalidate the *current* context and
|
|
also mark the cpu_tlbstate _quickly_ to indicate future invalidation to be
|
|
required.
|
|
|
|
At the next context-switch, look for this indicator
|
|
('invalidate_other' being set) invalidate all of the
|
|
cpu_tlbstate.ctxs[] entries.
|
|
|
|
This ensures that any future context switches will do a full flush
|
|
of the TLB, picking up the previous changes.
|
|
|
|
[ tglx: Folded more fixups from Peter ]
|
|
|
|
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
|
|
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Cc: Andy Lutomirski <luto@kernel.org>
|
|
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
Cc: Borislav Petkov <bp@alien8.de>
|
|
Cc: Brian Gerst <brgerst@gmail.com>
|
|
Cc: David Laight <David.Laight@aculab.com>
|
|
Cc: Denys Vlasenko <dvlasenk@redhat.com>
|
|
Cc: Eduardo Valentin <eduval@amazon.com>
|
|
Cc: Greg KH <gregkh@linuxfoundation.org>
|
|
Cc: H. Peter Anvin <hpa@zytor.com>
|
|
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
|
|
Cc: Juergen Gross <jgross@suse.com>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Cc: Will Deacon <will.deacon@arm.com>
|
|
Cc: aliguori@amazon.com
|
|
Cc: daniel.gruss@iaik.tugraz.at
|
|
Cc: hughd@google.com
|
|
Cc: keescook@google.com
|
|
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
|
(cherry picked from commit 2ea907c4fe7b78e5840c1dc07800eae93248cad1)
|
|
Signed-off-by: Andy Whitcroft <apw@canonical.com>
|
|
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
|
|
(cherry picked from commit fbb7e6e9e7e7cedecc164d660d08563f88103b56)
|
|
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
|
---
|
|
arch/x86/include/asm/tlbflush.h | 37 +++++++++++++++++++++++++++++--------
|
|
arch/x86/mm/tlb.c | 35 +++++++++++++++++++++++++++++++++++
|
|
2 files changed, 64 insertions(+), 8 deletions(-)
|
|
|
|
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
|
|
index 503f87c30c15..3769ce182eac 100644
|
|
--- a/arch/x86/include/asm/tlbflush.h
|
|
+++ b/arch/x86/include/asm/tlbflush.h
|
|
@@ -124,6 +124,17 @@ struct tlb_state {
|
|
*/
|
|
bool is_lazy;
|
|
|
|
+ /*
|
|
+ * If set we changed the page tables in such a way that we
|
|
+ * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
|
|
+ * This tells us to go invalidate all the non-loaded ctxs[]
|
|
+ * on the next context switch.
|
|
+ *
|
|
+ * The current ctx was kept up-to-date as it ran and does not
|
|
+ * need to be invalidated.
|
|
+ */
|
|
+ bool invalidate_other;
|
|
+
|
|
/*
|
|
* Access to this CR4 shadow and to H/W CR4 is protected by
|
|
* disabling interrupts when modifying either one.
|
|
@@ -201,6 +212,14 @@ static inline unsigned long cr4_read_shadow(void)
|
|
return this_cpu_read(cpu_tlbstate.cr4);
|
|
}
|
|
|
|
+/*
|
|
+ * Mark all other ASIDs as invalid, preserves the current.
|
|
+ */
|
|
+static inline void invalidate_other_asid(void)
|
|
+{
|
|
+ this_cpu_write(cpu_tlbstate.invalidate_other, true);
|
|
+}
|
|
+
|
|
/*
|
|
* Save some of cr4 feature set we're using (e.g. Pentium 4MB
|
|
* enable and PPro Global page enable), so that any CPU's that boot
|
|
@@ -287,14 +306,6 @@ static inline void __flush_tlb_all(void)
|
|
*/
|
|
__flush_tlb();
|
|
}
|
|
-
|
|
- /*
|
|
- * Note: if we somehow had PCID but not PGE, then this wouldn't work --
|
|
- * we'd end up flushing kernel translations for the current ASID but
|
|
- * we might fail to flush kernel translations for other cached ASIDs.
|
|
- *
|
|
- * To avoid this issue, we force PCID off if PGE is off.
|
|
- */
|
|
}
|
|
|
|
/*
|
|
@@ -304,6 +315,16 @@ static inline void __flush_tlb_one(unsigned long addr)
|
|
{
|
|
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
|
|
__flush_tlb_single(addr);
|
|
+
|
|
+ if (!static_cpu_has(X86_FEATURE_PTI))
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * __flush_tlb_single() will have cleared the TLB entry for this ASID,
|
|
+ * but since kernel space is replicated across all, we must also
|
|
+ * invalidate all others.
|
|
+ */
|
|
+ invalidate_other_asid();
|
|
}
|
|
|
|
#define TLB_FLUSH_ALL -1UL
|
|
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
|
|
index 87d4f961bcb4..ce87b69fb4e0 100644
|
|
--- a/arch/x86/mm/tlb.c
|
|
+++ b/arch/x86/mm/tlb.c
|
|
@@ -28,6 +28,38 @@
|
|
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
|
|
*/
|
|
|
|
+/*
|
|
+ * We get here when we do something requiring a TLB invalidation
|
|
+ * but could not go invalidate all of the contexts. We do the
|
|
+ * necessary invalidation by clearing out the 'ctx_id' which
|
|
+ * forces a TLB flush when the context is loaded.
|
|
+ */
|
|
+void clear_asid_other(void)
|
|
+{
|
|
+ u16 asid;
|
|
+
|
|
+ /*
|
|
+ * This is only expected to be set if we have disabled
|
|
+ * kernel _PAGE_GLOBAL pages.
|
|
+ */
|
|
+ if (!static_cpu_has(X86_FEATURE_PTI)) {
|
|
+ WARN_ON_ONCE(1);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
|
|
+ /* Do not need to flush the current asid */
|
|
+ if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
|
|
+ continue;
|
|
+ /*
|
|
+ * Make sure the next time we go to switch to
|
|
+ * this asid, we do a flush:
|
|
+ */
|
|
+ this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
|
|
+ }
|
|
+ this_cpu_write(cpu_tlbstate.invalidate_other, false);
|
|
+}
|
|
+
|
|
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
|
|
|
|
DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
|
|
@@ -43,6 +75,9 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
|
|
return;
|
|
}
|
|
|
|
+ if (this_cpu_read(cpu_tlbstate.invalidate_other))
|
|
+ clear_asid_other();
|
|
+
|
|
for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
|
|
if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
|
|
next->context.ctx_id)
|
|
--
|
|
2.14.2
|
|
|