2018-01-06 17:13:39 +03:00
|
|
|
From 9e6bc95ae1c4b92d9838ee8d2ee8b0e65f4e4469 Mon Sep 17 00:00:00 2001
|
|
|
|
From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
|
|
|
|
Date: Fri, 27 Oct 2017 13:25:28 -0700
|
2018-01-08 12:25:09 +03:00
|
|
|
Subject: [PATCH 083/241] x86/mm: Relocate page fault error codes to traps.h
|
2018-01-06 17:13:39 +03:00
|
|
|
MIME-Version: 1.0
|
|
|
|
Content-Type: text/plain; charset=UTF-8
|
|
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
|
|
|
|
CVE-2017-5754
|
|
|
|
|
|
|
|
Up to this point, only fault.c used the definitions of the page fault error
|
|
|
|
codes. Thus, it made sense to keep them within such file. Other portions of
|
|
|
|
code might be interested in those definitions too. For instance, the User-
|
|
|
|
Mode Instruction Prevention emulation code will use such definitions to
|
|
|
|
emulate a page fault when it is unable to successfully copy the results
|
|
|
|
of the emulated instructions to user space.
|
|
|
|
|
|
|
|
While relocating the error code enumeration, the prefix X86_ is used to
|
|
|
|
make it consistent with the rest of the definitions in traps.h. Of course,
|
|
|
|
code using the enumeration had to be updated as well. No functional changes
|
|
|
|
were performed.
|
|
|
|
|
|
|
|
Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
|
|
Reviewed-by: Andy Lutomirski <luto@kernel.org>
|
|
|
|
Cc: "Michael S. Tsirkin" <mst@redhat.com>
|
|
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
|
|
Cc: Dave Hansen <dave.hansen@linux.intel.com>
|
|
|
|
Cc: ricardo.neri@intel.com
|
|
|
|
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
|
|
|
|
Cc: Huang Rui <ray.huang@amd.com>
|
|
|
|
Cc: Shuah Khan <shuah@kernel.org>
|
|
|
|
Cc: Jonathan Corbet <corbet@lwn.net>
|
|
|
|
Cc: Jiri Slaby <jslaby@suse.cz>
|
|
|
|
Cc: "Ravi V. Shankar" <ravi.v.shankar@intel.com>
|
|
|
|
Cc: Chris Metcalf <cmetcalf@mellanox.com>
|
|
|
|
Cc: Brian Gerst <brgerst@gmail.com>
|
|
|
|
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
|
|
|
|
Cc: Chen Yucong <slaoub@gmail.com>
|
|
|
|
Cc: Vlastimil Babka <vbabka@suse.cz>
|
|
|
|
Cc: Masami Hiramatsu <mhiramat@kernel.org>
|
|
|
|
Cc: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
|
Cc: Andrew Morton <akpm@linux-foundation.org>
|
|
|
|
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
|
|
|
|
Link: https://lkml.kernel.org/r/1509135945-13762-2-git-send-email-ricardo.neri-calderon@linux.intel.com
|
|
|
|
|
|
|
|
(cherry picked from commit 1067f030994c69ca1fba8c607437c8895dcf8509)
|
|
|
|
Signed-off-by: Andy Whitcroft <apw@canonical.com>
|
|
|
|
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
|
|
|
|
(cherry picked from commit a85a07ab9111e3c78797c20b60a664dbd5db4981)
|
|
|
|
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
|
|
|
---
|
|
|
|
arch/x86/include/asm/traps.h | 18 +++++++++
|
|
|
|
arch/x86/mm/fault.c | 88 +++++++++++++++++---------------------------
|
|
|
|
2 files changed, 52 insertions(+), 54 deletions(-)
|
|
|
|
|
|
|
|
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
|
|
|
|
index feb89dbe359d..8e5bf86f87e5 100644
|
|
|
|
--- a/arch/x86/include/asm/traps.h
|
|
|
|
+++ b/arch/x86/include/asm/traps.h
|
|
|
|
@@ -162,4 +162,22 @@ enum {
|
|
|
|
X86_TRAP_IRET = 32, /* 32, IRET Exception */
|
|
|
|
};
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Page fault error code bits:
|
|
|
|
+ *
|
|
|
|
+ * bit 0 == 0: no page found 1: protection fault
|
|
|
|
+ * bit 1 == 0: read access 1: write access
|
|
|
|
+ * bit 2 == 0: kernel-mode access 1: user-mode access
|
|
|
|
+ * bit 3 == 1: use of reserved bit detected
|
|
|
|
+ * bit 4 == 1: fault was an instruction fetch
|
|
|
|
+ * bit 5 == 1: protection keys block access
|
|
|
|
+ */
|
|
|
|
+enum x86_pf_error_code {
|
|
|
|
+ X86_PF_PROT = 1 << 0,
|
|
|
|
+ X86_PF_WRITE = 1 << 1,
|
|
|
|
+ X86_PF_USER = 1 << 2,
|
|
|
|
+ X86_PF_RSVD = 1 << 3,
|
|
|
|
+ X86_PF_INSTR = 1 << 4,
|
|
|
|
+ X86_PF_PK = 1 << 5,
|
|
|
|
+};
|
|
|
|
#endif /* _ASM_X86_TRAPS_H */
|
|
|
|
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
|
|
|
|
index 4ee9eb916826..d3a57e7ad311 100644
|
|
|
|
--- a/arch/x86/mm/fault.c
|
|
|
|
+++ b/arch/x86/mm/fault.c
|
|
|
|
@@ -28,26 +28,6 @@
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <asm/trace/exceptions.h>
|
|
|
|
|
|
|
|
-/*
|
|
|
|
- * Page fault error code bits:
|
|
|
|
- *
|
|
|
|
- * bit 0 == 0: no page found 1: protection fault
|
|
|
|
- * bit 1 == 0: read access 1: write access
|
|
|
|
- * bit 2 == 0: kernel-mode access 1: user-mode access
|
|
|
|
- * bit 3 == 1: use of reserved bit detected
|
|
|
|
- * bit 4 == 1: fault was an instruction fetch
|
|
|
|
- * bit 5 == 1: protection keys block access
|
|
|
|
- */
|
|
|
|
-enum x86_pf_error_code {
|
|
|
|
-
|
|
|
|
- PF_PROT = 1 << 0,
|
|
|
|
- PF_WRITE = 1 << 1,
|
|
|
|
- PF_USER = 1 << 2,
|
|
|
|
- PF_RSVD = 1 << 3,
|
|
|
|
- PF_INSTR = 1 << 4,
|
|
|
|
- PF_PK = 1 << 5,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
/*
|
|
|
|
* Returns 0 if mmiotrace is disabled, or if the fault is not
|
|
|
|
* handled by mmiotrace:
|
|
|
|
@@ -149,7 +129,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
|
|
|
|
* If it was a exec (instruction fetch) fault on NX page, then
|
|
|
|
* do not ignore the fault:
|
|
|
|
*/
|
|
|
|
- if (error_code & PF_INSTR)
|
|
|
|
+ if (error_code & X86_PF_INSTR)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
instr = (void *)convert_ip_to_linear(current, regs);
|
|
|
|
@@ -179,7 +159,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
|
|
|
|
* siginfo so userspace can discover which protection key was set
|
|
|
|
* on the PTE.
|
|
|
|
*
|
|
|
|
- * If we get here, we know that the hardware signaled a PF_PK
|
|
|
|
+ * If we get here, we know that the hardware signaled a X86_PF_PK
|
|
|
|
* fault and that there was a VMA once we got in the fault
|
|
|
|
* handler. It does *not* guarantee that the VMA we find here
|
|
|
|
* was the one that we faulted on.
|
|
|
|
@@ -204,7 +184,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
|
|
|
|
/*
|
|
|
|
* force_sig_info_fault() is called from a number of
|
|
|
|
* contexts, some of which have a VMA and some of which
|
|
|
|
- * do not. The PF_PK handing happens after we have a
|
|
|
|
+ * do not. The X86_PF_PK handing happens after we have a
|
|
|
|
* valid VMA, so we should never reach this without a
|
|
|
|
* valid VMA.
|
|
|
|
*/
|
|
|
|
@@ -693,7 +673,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
|
|
|
|
if (!oops_may_print())
|
|
|
|
return;
|
|
|
|
|
|
|
|
- if (error_code & PF_INSTR) {
|
|
|
|
+ if (error_code & X86_PF_INSTR) {
|
|
|
|
unsigned int level;
|
|
|
|
pgd_t *pgd;
|
|
|
|
pte_t *pte;
|
|
|
|
@@ -775,7 +755,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
|
|
|
|
*/
|
|
|
|
if (current->thread.sig_on_uaccess_err && signal) {
|
|
|
|
tsk->thread.trap_nr = X86_TRAP_PF;
|
|
|
|
- tsk->thread.error_code = error_code | PF_USER;
|
|
|
|
+ tsk->thread.error_code = error_code | X86_PF_USER;
|
|
|
|
tsk->thread.cr2 = address;
|
|
|
|
|
|
|
|
/* XXX: hwpoison faults will set the wrong code. */
|
|
|
|
@@ -894,7 +874,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
|
|
|
|
struct task_struct *tsk = current;
|
|
|
|
|
|
|
|
/* User mode accesses just cause a SIGSEGV */
|
|
|
|
- if (error_code & PF_USER) {
|
|
|
|
+ if (error_code & X86_PF_USER) {
|
|
|
|
/*
|
|
|
|
* It's possible to have interrupts off here:
|
|
|
|
*/
|
|
|
|
@@ -915,7 +895,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
|
|
|
|
* Instruction fetch faults in the vsyscall page might need
|
|
|
|
* emulation.
|
|
|
|
*/
|
|
|
|
- if (unlikely((error_code & PF_INSTR) &&
|
|
|
|
+ if (unlikely((error_code & X86_PF_INSTR) &&
|
|
|
|
((address & ~0xfff) == VSYSCALL_ADDR))) {
|
|
|
|
if (emulate_vsyscall(regs, address))
|
|
|
|
return;
|
|
|
|
@@ -928,7 +908,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
|
|
|
|
* are always protection faults.
|
|
|
|
*/
|
|
|
|
if (address >= TASK_SIZE_MAX)
|
|
|
|
- error_code |= PF_PROT;
|
|
|
|
+ error_code |= X86_PF_PROT;
|
|
|
|
|
|
|
|
if (likely(show_unhandled_signals))
|
|
|
|
show_signal_msg(regs, error_code, address, tsk);
|
|
|
|
@@ -989,11 +969,11 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code,
|
|
|
|
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_OSPKE))
|
|
|
|
return false;
|
|
|
|
- if (error_code & PF_PK)
|
|
|
|
+ if (error_code & X86_PF_PK)
|
|
|
|
return true;
|
|
|
|
/* this checks permission keys on the VMA: */
|
|
|
|
- if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
|
|
|
|
- (error_code & PF_INSTR), foreign))
|
|
|
|
+ if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
|
|
|
|
+ (error_code & X86_PF_INSTR), foreign))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
@@ -1021,7 +1001,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
|
|
|
|
int code = BUS_ADRERR;
|
|
|
|
|
|
|
|
/* Kernel mode? Handle exceptions or die: */
|
|
|
|
- if (!(error_code & PF_USER)) {
|
|
|
|
+ if (!(error_code & X86_PF_USER)) {
|
|
|
|
no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
@@ -1049,14 +1029,14 @@ static noinline void
|
|
|
|
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
|
|
|
|
unsigned long address, u32 *pkey, unsigned int fault)
|
|
|
|
{
|
|
|
|
- if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
|
|
|
|
+ if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
|
|
|
|
no_context(regs, error_code, address, 0, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fault & VM_FAULT_OOM) {
|
|
|
|
/* Kernel mode? Handle exceptions or die: */
|
|
|
|
- if (!(error_code & PF_USER)) {
|
|
|
|
+ if (!(error_code & X86_PF_USER)) {
|
|
|
|
no_context(regs, error_code, address,
|
|
|
|
SIGSEGV, SEGV_MAPERR);
|
|
|
|
return;
|
|
|
|
@@ -1081,16 +1061,16 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
|
|
|
|
|
|
|
|
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
|
|
|
|
{
|
|
|
|
- if ((error_code & PF_WRITE) && !pte_write(*pte))
|
|
|
|
+ if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
- if ((error_code & PF_INSTR) && !pte_exec(*pte))
|
|
|
|
+ if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
|
|
|
|
return 0;
|
|
|
|
/*
|
|
|
|
* Note: We do not do lazy flushing on protection key
|
|
|
|
- * changes, so no spurious fault will ever set PF_PK.
|
|
|
|
+ * changes, so no spurious fault will ever set X86_PF_PK.
|
|
|
|
*/
|
|
|
|
- if ((error_code & PF_PK))
|
|
|
|
+ if ((error_code & X86_PF_PK))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
@@ -1136,8 +1116,8 @@ spurious_fault(unsigned long error_code, unsigned long address)
|
|
|
|
* change, so user accesses are not expected to cause spurious
|
|
|
|
* faults.
|
|
|
|
*/
|
|
|
|
- if (error_code != (PF_WRITE | PF_PROT)
|
|
|
|
- && error_code != (PF_INSTR | PF_PROT))
|
|
|
|
+ if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
|
|
|
|
+ error_code != (X86_PF_INSTR | X86_PF_PROT))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pgd = init_mm.pgd + pgd_index(address);
|
|
|
|
@@ -1197,19 +1177,19 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
|
|
|
|
* always an unconditional error and can never result in
|
|
|
|
* a follow-up action to resolve the fault, like a COW.
|
|
|
|
*/
|
|
|
|
- if (error_code & PF_PK)
|
|
|
|
+ if (error_code & X86_PF_PK)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure to check the VMA so that we do not perform
|
|
|
|
- * faults just to hit a PF_PK as soon as we fill in a
|
|
|
|
+ * faults just to hit a X86_PF_PK as soon as we fill in a
|
|
|
|
* page.
|
|
|
|
*/
|
|
|
|
- if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
|
|
|
|
- (error_code & PF_INSTR), foreign))
|
|
|
|
+ if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
|
|
|
|
+ (error_code & X86_PF_INSTR), foreign))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
- if (error_code & PF_WRITE) {
|
|
|
|
+ if (error_code & X86_PF_WRITE) {
|
|
|
|
/* write, present and write, not present: */
|
|
|
|
if (unlikely(!(vma->vm_flags & VM_WRITE)))
|
|
|
|
return 1;
|
|
|
|
@@ -1217,7 +1197,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
|
|
|
|
}
|
|
|
|
|
|
|
|
/* read, present: */
|
|
|
|
- if (unlikely(error_code & PF_PROT))
|
|
|
|
+ if (unlikely(error_code & X86_PF_PROT))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* read, not present: */
|
|
|
|
@@ -1240,7 +1220,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
|
|
|
|
if (!static_cpu_has(X86_FEATURE_SMAP))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
- if (error_code & PF_USER)
|
|
|
|
+ if (error_code & X86_PF_USER)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
|
|
|
|
@@ -1293,7 +1273,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
|
|
|
|
* protection error (error_code & 9) == 0.
|
|
|
|
*/
|
|
|
|
if (unlikely(fault_in_kernel_space(address))) {
|
|
|
|
- if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
|
|
|
|
+ if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
|
|
|
|
if (vmalloc_fault(address) >= 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
@@ -1321,7 +1301,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
|
|
|
|
if (unlikely(kprobes_fault(regs)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
- if (unlikely(error_code & PF_RSVD))
|
|
|
|
+ if (unlikely(error_code & X86_PF_RSVD))
|
|
|
|
pgtable_bad(regs, error_code, address);
|
|
|
|
|
|
|
|
if (unlikely(smap_violation(error_code, regs))) {
|
|
|
|
@@ -1347,7 +1327,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
|
|
|
|
*/
|
|
|
|
if (user_mode(regs)) {
|
|
|
|
local_irq_enable();
|
|
|
|
- error_code |= PF_USER;
|
|
|
|
+ error_code |= X86_PF_USER;
|
|
|
|
flags |= FAULT_FLAG_USER;
|
|
|
|
} else {
|
|
|
|
if (regs->flags & X86_EFLAGS_IF)
|
|
|
|
@@ -1356,9 +1336,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
|
|
|
|
|
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
|
|
|
|
|
|
|
- if (error_code & PF_WRITE)
|
|
|
|
+ if (error_code & X86_PF_WRITE)
|
|
|
|
flags |= FAULT_FLAG_WRITE;
|
|
|
|
- if (error_code & PF_INSTR)
|
|
|
|
+ if (error_code & X86_PF_INSTR)
|
|
|
|
flags |= FAULT_FLAG_INSTRUCTION;
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -1378,7 +1358,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
|
|
|
|
* space check, thus avoiding the deadlock:
|
|
|
|
*/
|
|
|
|
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
|
|
|
|
- if ((error_code & PF_USER) == 0 &&
|
|
|
|
+ if (!(error_code & X86_PF_USER) &&
|
|
|
|
!search_exception_tables(regs->ip)) {
|
|
|
|
bad_area_nosemaphore(regs, error_code, address, NULL);
|
|
|
|
return;
|
|
|
|
@@ -1405,7 +1385,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
|
|
|
|
bad_area(regs, error_code, address);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
- if (error_code & PF_USER) {
|
|
|
|
+ if (error_code & X86_PF_USER) {
|
|
|
|
/*
|
|
|
|
* Accessing the stack below %sp is always a bug.
|
|
|
|
* The large cushion allows instructions like enter
|
|
|
|
--
|
|
|
|
2.14.2
|
|
|
|
|