From 0a0e88a03210365fb82b7ab9cebfccca31aff608 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Fri, 23 Jun 2017 08:25:20 +0200 Subject: [PATCH 1/4] Revert "mm: enlarge stack guard gap" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit fe388e5751e74b3534ee21d01b999795dfc83d39. Signed-off-by: Fabian Grünbichler --- include/linux/mm.h | 40 +++++++++++--- arch/ia64/mm/fault.c | 2 +- fs/exec.c | 8 +-- fs/proc/task_mmu.c | 11 ++-- mm/gup.c | 4 +- mm/memory.c | 35 +++++++++++- mm/mmap.c | 152 ++++++++++----------------------------------------- 7 files changed, 102 insertions(+), 150 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index fbe65ceafb94..3978a350e9e4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1366,11 +1366,39 @@ int clear_page_dirty_for_io(struct page *page); int get_cmdline(struct task_struct *task, char *buffer, int buflen); +/* Is the vma a continuation of the stack vma above it? */ +static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) +{ + return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); +} + static inline bool vma_is_anonymous(struct vm_area_struct *vma) { return !vma->vm_ops; } +static inline int stack_guard_page_start(struct vm_area_struct *vma, + unsigned long addr) +{ + return (vma->vm_flags & VM_GROWSDOWN) && + (vma->vm_start == addr) && + !vma_growsdown(vma->vm_prev, addr); +} + +/* Is the vma a continuation of the stack vma below it? */ +static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) +{ + return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); +} + +static inline int stack_guard_page_end(struct vm_area_struct *vma, + unsigned long addr) +{ + return (vma->vm_flags & VM_GROWSUP) && + (vma->vm_end == addr) && + !vma_growsup(vma->vm_next, addr); +} + int vma_is_stack_for_current(struct vm_area_struct *vma); extern unsigned long move_page_tables(struct vm_area_struct *vma, @@ -2111,22 +2139,16 @@ void page_cache_async_readahead(struct address_space *mapping, pgoff_t offset, unsigned long size); -extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); -extern int stack_guard_area(struct vm_area_struct *vma, unsigned long address); /* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ extern int expand_downwards(struct vm_area_struct *vma, - unsigned long address, unsigned long gap); -unsigned long expandable_stack_area(struct vm_area_struct *vma, - unsigned long address, unsigned long *gap); - + unsigned long address); #if VM_GROWSUP -extern int expand_upwards(struct vm_area_struct *vma, - unsigned long address, unsigned long gap); +extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); #else - #define expand_upwards(vma, address, gap) (0) + #define expand_upwards(vma, address) (0) #endif /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index d5caa3cab925..fa6ad95e992e 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -224,7 +224,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re */ if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) goto bad_area; - if (expand_upwards(vma, address, 0)) + if (expand_upwards(vma, address)) goto bad_area; } goto good_area; diff --git a/fs/exec.c b/fs/exec.c index 5b6383208379..1825e64f8bf3 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -205,7 +205,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, #ifdef CONFIG_STACK_GROWSUP if (write) { - ret = expand_downwards(bprm->vma, pos, 0); + ret = expand_downwards(bprm->vma, pos); if (ret < 0) return NULL; } @@ -227,12 +227,6 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; struct rlimit *rlim; - /* - * GRWOSUP doesn't really have any gap at this stage because we grow - * the stack down now. See the expand_downwards above. - */ - if (!IS_ENABLED(CONFIG_STACK_GROWSUP)) - size -= stack_guard_gap; acct_arg_size(bprm, size / PAGE_SIZE); /* diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 75f9099e2ecf..52049b595b0f 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -302,14 +302,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; + if (stack_guard_page_start(vma, start)) + start += PAGE_SIZE; end = vma->vm_end; - if (vma->vm_flags & VM_GROWSDOWN) { - if (stack_guard_area(vma, start)) - start += stack_guard_gap; - } else if (vma->vm_flags & VM_GROWSUP) { - if (stack_guard_area(vma, end)) - end -= stack_guard_gap; - } + if (stack_guard_page_end(vma, end)) + end -= PAGE_SIZE; seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", diff --git a/mm/gup.c b/mm/gup.c index 90252d1038b9..bb5f3d69f87e 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -372,7 +372,9 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) return -ENOENT; /* For mm_populate(), just skip the stack guard page. */ - if ((*flags & FOLL_POPULATE) && stack_guard_area(vma, address)) + if ((*flags & FOLL_POPULATE) && + (stack_guard_page_start(vma, address) || + stack_guard_page_end(vma, address + PAGE_SIZE))) return -ENOENT; if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; diff --git a/mm/memory.c b/mm/memory.c index fca9dc75a04d..c89214451507 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2714,7 +2714,39 @@ int do_swap_page(struct vm_fault *vmf) return ret; } +/* + * This is like a special single-page "expand_{down|up}wards()", + * except we must first make sure that 'address{-|+}PAGE_SIZE' + * doesn't hit another vma. + */ +static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) +{ + address &= PAGE_MASK; + if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { + struct vm_area_struct *prev = vma->vm_prev; + + /* + * Is there a mapping abutting this one below? + * + * That's only ok if it's the same stack mapping + * that has gotten split.. + */ + if (prev && prev->vm_end == address) + return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; + return expand_downwards(vma, address - PAGE_SIZE); + } + if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { + struct vm_area_struct *next = vma->vm_next; + + /* As VM_GROWSDOWN but s/below/above/ */ + if (next && next->vm_start == address + PAGE_SIZE) + return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; + + return expand_upwards(vma, address + PAGE_SIZE); + } + return 0; +} /* * We enter with non-exclusive mmap_sem (to exclude vma changes, @@ -2733,8 +2765,7 @@ static int do_anonymous_page(struct vm_fault *vmf) return VM_FAULT_SIGBUS; /* Check if we need to add a guard page to the stack */ - if ((vma->vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) && - expand_stack(vma, vmf->address) < 0) + if (check_stack_guard_page(vma, vmf->address) < 0) return VM_FAULT_SIGSEGV; /* diff --git a/mm/mmap.c b/mm/mmap.c index 2a3bdf11baf0..4b3a2aaa18e9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2151,8 +2151,7 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, * update accounting. This is shared with both the * grow-up and grow-down cases. */ -static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow, - unsigned long gap) +static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; @@ -2165,7 +2164,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns /* Stack limit test */ actual_size = size; if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) - actual_size -= gap; + actual_size -= PAGE_SIZE; if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; @@ -2201,7 +2200,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ -int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned long gap) +int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; int error = 0; @@ -2209,6 +2208,12 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; + /* Guard against wrapping around to address 0. */ + if (address < PAGE_ALIGN(address+4)) + address = PAGE_ALIGN(address+4); + else + return -ENOMEM; + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; @@ -2229,7 +2234,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l error = -ENOMEM; if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { - error = acct_stack_growth(vma, size, grow, gap); + error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent @@ -2270,7 +2275,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l * vma is the first one with address < vma->vm_start. Have to extend vma. */ int expand_downwards(struct vm_area_struct *vma, - unsigned long address, unsigned long gap) + unsigned long address) { struct mm_struct *mm = vma->vm_mm; int error; @@ -2300,7 +2305,7 @@ int expand_downwards(struct vm_area_struct *vma, error = -ENOMEM; if (grow <= vma->vm_pgoff) { - error = acct_stack_growth(vma, size, grow, gap); + error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent @@ -2334,72 +2339,29 @@ int expand_downwards(struct vm_area_struct *vma, return error; } -/* enforced gap between the expanding stack and other mappings. */ -unsigned long stack_guard_gap = 256UL<vm_next; - unsigned long guard_gap = stack_guard_gap; - unsigned long guard_addr; - - address = ALIGN(address, PAGE_SIZE);; - if (!next) - goto out; - - if (next->vm_flags & VM_GROWSUP) { - guard_gap = min(guard_gap, next->vm_start - address); - goto out; - } - - if (next->vm_start - address < guard_gap) - return -ENOMEM; -out: - if (TASK_SIZE - address < guard_gap) - guard_gap = TASK_SIZE - address; - guard_addr = address + guard_gap; - *gap = guard_gap; - - return guard_addr; -} - int expand_stack(struct vm_area_struct *vma, unsigned long address) { - unsigned long gap; - - address = expandable_stack_area(vma, address, &gap); - if (IS_ERR_VALUE(address)) - return -ENOMEM; - return expand_upwards(vma, address, gap); -} - -int stack_guard_area(struct vm_area_struct *vma, unsigned long address) -{ struct vm_area_struct *next; - if (!(vma->vm_flags & VM_GROWSUP)) - return 0; - - /* - * strictly speaking there is a guard gap between disjoint stacks - * but the gap is not canonical (it might be smaller) and it is - * reasonably safe to assume that we can ignore that gap for stack - * POPULATE or /proc/[s]maps purposes - */ + address &= PAGE_MASK; next = vma->vm_next; - if (next && next->vm_flags & VM_GROWSUP) - return 0; - - return vma->vm_end - address <= stack_guard_gap; + if (next && next->vm_start == address + PAGE_SIZE) { + if (!(next->vm_flags & VM_GROWSUP)) + return -ENOMEM; + } + return expand_upwards(vma, address); } struct vm_area_struct * @@ -2418,73 +2380,17 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) return prev; } #else -unsigned long expandable_stack_area(struct vm_area_struct *vma, - unsigned long address, unsigned long *gap) -{ - struct vm_area_struct *prev = vma->vm_prev; - unsigned long guard_gap = stack_guard_gap; - unsigned long guard_addr; - - address &= PAGE_MASK; - if (!prev) - goto out; - - /* - * Is there a mapping abutting this one below? - * - * That's only ok if it's the same stack mapping - * that has gotten split or there is sufficient gap - * between mappings - */ - if (prev->vm_flags & VM_GROWSDOWN) { - guard_gap = min(guard_gap, address - prev->vm_end); - goto out; - } - - if (address - prev->vm_end < guard_gap) - return -ENOMEM; - -out: - /* make sure we won't underflow */ - if (address < mmap_min_addr) - return -ENOMEM; - if (address - mmap_min_addr < guard_gap) - guard_gap = address - mmap_min_addr; - - guard_addr = address - guard_gap; - *gap = guard_gap; - - return guard_addr; -} - int expand_stack(struct vm_area_struct *vma, unsigned long address) { - unsigned long gap; - - address = expandable_stack_area(vma, address, &gap); - if (IS_ERR_VALUE(address)) - return -ENOMEM; - return expand_downwards(vma, address, gap); -} - -int stack_guard_area(struct vm_area_struct *vma, unsigned long address) -{ struct vm_area_struct *prev; - if (!(vma->vm_flags & VM_GROWSDOWN)) - return 0; - - /* - * strictly speaking there is a guard gap between disjoint stacks - * but the gap is not canonical (it might be smaller) and it is - * reasonably safe to assume that we can ignore that gap for stack - * POPULATE or /proc/[s]maps purposes - */ + address &= PAGE_MASK; prev = vma->vm_prev; - if (prev && prev->vm_flags & VM_GROWSDOWN) - return 0; - - return address - vma->vm_start < stack_guard_gap; + if (prev && prev->vm_end == address) { + if (!(prev->vm_flags & VM_GROWSDOWN)) + return -ENOMEM; + } + return expand_downwards(vma, address); } struct vm_area_struct * -- 2.11.0