103 lines
2.8 KiB
C
103 lines
2.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_PAGE_64_H
|
|
#define _ASM_X86_PAGE_64_H
|
|
|
|
#include <asm/page_64_types.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#include <asm/cpufeatures.h>
|
|
#include <asm/alternative.h>
|
|
|
|
#include <linux/kmsan-checks.h>
|
|
|
|
/* duplicated to the one in bootmem.h */
|
|
extern unsigned long max_pfn;
|
|
extern unsigned long phys_base;
|
|
|
|
extern unsigned long page_offset_base;
|
|
extern unsigned long vmalloc_base;
|
|
extern unsigned long vmemmap_base;
|
|
|
|
static __always_inline unsigned long __phys_addr_nodebug(unsigned long x)
|
|
{
|
|
unsigned long y = x - __START_KERNEL_map;
|
|
|
|
/* use the carry flag to determine if x was < __START_KERNEL_map */
|
|
x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
|
|
|
|
return x;
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_VIRTUAL
|
|
extern unsigned long __phys_addr(unsigned long);
|
|
extern unsigned long __phys_addr_symbol(unsigned long);
|
|
#else
|
|
#define __phys_addr(x) __phys_addr_nodebug(x)
|
|
#define __phys_addr_symbol(x) \
|
|
((unsigned long)(x) - __START_KERNEL_map + phys_base)
|
|
#endif
|
|
|
|
#define __phys_reloc_hide(x) (x)
|
|
|
|
void clear_page_orig(void *page);
|
|
void clear_page_rep(void *page);
|
|
void clear_page_erms(void *page);
|
|
|
|
static inline void clear_page(void *page)
|
|
{
|
|
/*
|
|
* Clean up KMSAN metadata for the page being cleared. The assembly call
|
|
* below clobbers @page, so we perform unpoisoning before it.
|
|
*/
|
|
kmsan_unpoison_memory(page, PAGE_SIZE);
|
|
alternative_call_2(clear_page_orig,
|
|
clear_page_rep, X86_FEATURE_REP_GOOD,
|
|
clear_page_erms, X86_FEATURE_ERMS,
|
|
"=D" (page),
|
|
"0" (page)
|
|
: "cc", "memory", "rax", "rcx");
|
|
}
|
|
|
|
void copy_page(void *to, void *from);
|
|
|
|
#ifdef CONFIG_X86_5LEVEL
|
|
/*
|
|
* User space process size. This is the first address outside the user range.
|
|
* There are a few constraints that determine this:
|
|
*
|
|
* On Intel CPUs, if a SYSCALL instruction is at the highest canonical
|
|
* address, then that syscall will enter the kernel with a
|
|
* non-canonical return address, and SYSRET will explode dangerously.
|
|
* We avoid this particular problem by preventing anything
|
|
* from being mapped at the maximum canonical address.
|
|
*
|
|
* On AMD CPUs in the Ryzen family, there's a nasty bug in which the
|
|
* CPUs malfunction if they execute code from the highest canonical page.
|
|
* They'll speculate right off the end of the canonical space, and
|
|
* bad things happen. This is worked around in the same way as the
|
|
* Intel problem.
|
|
*
|
|
* With page table isolation enabled, we map the LDT in ... [stay tuned]
|
|
*/
|
|
static __always_inline unsigned long task_size_max(void)
|
|
{
|
|
unsigned long ret;
|
|
|
|
alternative_io("movq %[small],%0","movq %[large],%0",
|
|
X86_FEATURE_LA57,
|
|
"=r" (ret),
|
|
[small] "i" ((1ul << 47)-PAGE_SIZE),
|
|
[large] "i" ((1ul << 56)-PAGE_SIZE));
|
|
|
|
return ret;
|
|
}
|
|
#endif /* CONFIG_X86_5LEVEL */
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#ifdef CONFIG_X86_VSYSCALL_EMULATION
|
|
# define __HAVE_ARCH_GATE_AREA 1
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_PAGE_64_H */
|