drop patches applied upstream

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht 2017-07-11 14:55:28 +02:00 committed by Dietmar Maurer
parent 638b9f828c
commit ea91ce10d6
14 changed files with 0 additions and 2622 deletions

View File

@ -1,461 +0,0 @@
From 0a0e88a03210365fb82b7ab9cebfccca31aff608 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= <f.gruenbichler@proxmox.com>
Date: Fri, 23 Jun 2017 08:25:20 +0200
Subject: [PATCH 1/4] Revert "mm: enlarge stack guard gap"
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This reverts commit fe388e5751e74b3534ee21d01b999795dfc83d39.
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
include/linux/mm.h | 40 +++++++++++---
arch/ia64/mm/fault.c | 2 +-
fs/exec.c | 8 +--
fs/proc/task_mmu.c | 11 ++--
mm/gup.c | 4 +-
mm/memory.c | 35 +++++++++++-
mm/mmap.c | 152 ++++++++++-----------------------------------------
7 files changed, 102 insertions(+), 150 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fbe65ceafb94..3978a350e9e4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1366,11 +1366,39 @@ int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
+{
+ return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{
return !vma->vm_ops;
}
+static inline int stack_guard_page_start(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return (vma->vm_flags & VM_GROWSDOWN) &&
+ (vma->vm_start == addr) &&
+ !vma_growsdown(vma->vm_prev, addr);
+}
+
+/* Is the vma a continuation of the stack vma below it? */
+static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
+{
+ return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
+}
+
+static inline int stack_guard_page_end(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return (vma->vm_flags & VM_GROWSUP) &&
+ (vma->vm_end == addr) &&
+ !vma_growsup(vma->vm_next, addr);
+}
+
int vma_is_stack_for_current(struct vm_area_struct *vma);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
@@ -2111,22 +2139,16 @@ void page_cache_async_readahead(struct address_space *mapping,
pgoff_t offset,
unsigned long size);
-extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
-extern int stack_guard_area(struct vm_area_struct *vma, unsigned long address);
/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
extern int expand_downwards(struct vm_area_struct *vma,
- unsigned long address, unsigned long gap);
-unsigned long expandable_stack_area(struct vm_area_struct *vma,
- unsigned long address, unsigned long *gap);
-
+ unsigned long address);
#if VM_GROWSUP
-extern int expand_upwards(struct vm_area_struct *vma,
- unsigned long address, unsigned long gap);
+extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
#else
- #define expand_upwards(vma, address, gap) (0)
+ #define expand_upwards(vma, address) (0)
#endif
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index d5caa3cab925..fa6ad95e992e 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -224,7 +224,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
*/
if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
goto bad_area;
- if (expand_upwards(vma, address, 0))
+ if (expand_upwards(vma, address))
goto bad_area;
}
goto good_area;
diff --git a/fs/exec.c b/fs/exec.c
index 5b6383208379..1825e64f8bf3 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -205,7 +205,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
#ifdef CONFIG_STACK_GROWSUP
if (write) {
- ret = expand_downwards(bprm->vma, pos, 0);
+ ret = expand_downwards(bprm->vma, pos);
if (ret < 0)
return NULL;
}
@@ -227,12 +227,6 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
struct rlimit *rlim;
- /*
- * GRWOSUP doesn't really have any gap at this stage because we grow
- * the stack down now. See the expand_downwards above.
- */
- if (!IS_ENABLED(CONFIG_STACK_GROWSUP))
- size -= stack_guard_gap;
acct_arg_size(bprm, size / PAGE_SIZE);
/*
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 75f9099e2ecf..52049b595b0f 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -302,14 +302,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
+ if (stack_guard_page_start(vma, start))
+ start += PAGE_SIZE;
end = vma->vm_end;
- if (vma->vm_flags & VM_GROWSDOWN) {
- if (stack_guard_area(vma, start))
- start += stack_guard_gap;
- } else if (vma->vm_flags & VM_GROWSUP) {
- if (stack_guard_area(vma, end))
- end -= stack_guard_gap;
- }
+ if (stack_guard_page_end(vma, end))
+ end -= PAGE_SIZE;
seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
diff --git a/mm/gup.c b/mm/gup.c
index 90252d1038b9..bb5f3d69f87e 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -372,7 +372,9 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT;
/* For mm_populate(), just skip the stack guard page. */
- if ((*flags & FOLL_POPULATE) && stack_guard_area(vma, address))
+ if ((*flags & FOLL_POPULATE) &&
+ (stack_guard_page_start(vma, address) ||
+ stack_guard_page_end(vma, address + PAGE_SIZE)))
return -ENOENT;
if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
diff --git a/mm/memory.c b/mm/memory.c
index fca9dc75a04d..c89214451507 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2714,7 +2714,39 @@ int do_swap_page(struct vm_fault *vmf)
return ret;
}
+/*
+ * This is like a special single-page "expand_{down|up}wards()",
+ * except we must first make sure that 'address{-|+}PAGE_SIZE'
+ * doesn't hit another vma.
+ */
+static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+{
+ address &= PAGE_MASK;
+ if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+ struct vm_area_struct *prev = vma->vm_prev;
+
+ /*
+ * Is there a mapping abutting this one below?
+ *
+ * That's only ok if it's the same stack mapping
+ * that has gotten split..
+ */
+ if (prev && prev->vm_end == address)
+ return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+ return expand_downwards(vma, address - PAGE_SIZE);
+ }
+ if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+ struct vm_area_struct *next = vma->vm_next;
+
+ /* As VM_GROWSDOWN but s/below/above/ */
+ if (next && next->vm_start == address + PAGE_SIZE)
+ return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+
+ return expand_upwards(vma, address + PAGE_SIZE);
+ }
+ return 0;
+}
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
@@ -2733,8 +2765,7 @@ static int do_anonymous_page(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
/* Check if we need to add a guard page to the stack */
- if ((vma->vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) &&
- expand_stack(vma, vmf->address) < 0)
+ if (check_stack_guard_page(vma, vmf->address) < 0)
return VM_FAULT_SIGSEGV;
/*
diff --git a/mm/mmap.c b/mm/mmap.c
index 2a3bdf11baf0..4b3a2aaa18e9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2151,8 +2151,7 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
* update accounting. This is shared with both the
* grow-up and grow-down cases.
*/
-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow,
- unsigned long gap)
+static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
@@ -2165,7 +2164,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
/* Stack limit test */
actual_size = size;
if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
- actual_size -= gap;
+ actual_size -= PAGE_SIZE;
if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
@@ -2201,7 +2200,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
-int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned long gap)
+int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
int error = 0;
@@ -2209,6 +2208,12 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
+ /* Guard against wrapping around to address 0. */
+ if (address < PAGE_ALIGN(address+4))
+ address = PAGE_ALIGN(address+4);
+ else
+ return -ENOMEM;
+
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
@@ -2229,7 +2234,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l
error = -ENOMEM;
if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
- error = acct_stack_growth(vma, size, grow, gap);
+ error = acct_stack_growth(vma, size, grow);
if (!error) {
/*
* vma_gap_update() doesn't support concurrent
@@ -2270,7 +2275,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l
* vma is the first one with address < vma->vm_start. Have to extend vma.
*/
int expand_downwards(struct vm_area_struct *vma,
- unsigned long address, unsigned long gap)
+ unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
int error;
@@ -2300,7 +2305,7 @@ int expand_downwards(struct vm_area_struct *vma,
error = -ENOMEM;
if (grow <= vma->vm_pgoff) {
- error = acct_stack_growth(vma, size, grow, gap);
+ error = acct_stack_growth(vma, size, grow);
if (!error) {
/*
* vma_gap_update() doesn't support concurrent
@@ -2334,72 +2339,29 @@ int expand_downwards(struct vm_area_struct *vma,
return error;
}
-/* enforced gap between the expanding stack and other mappings. */
-unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
-
/*
* Note how expand_stack() refuses to expand the stack all the way to
* abut the next virtual mapping, *unless* that mapping itself is also
- * a stack mapping. We want to leave room for a guard area, after all
+ * a stack mapping. We want to leave room for a guard page, after all
* (the guard page itself is not added here, that is done by the
* actual page faulting logic)
+ *
+ * This matches the behavior of the guard page logic (see mm/memory.c:
+ * check_stack_guard_page()), which only allows the guard page to be
+ * removed under these circumstances.
*/
#ifdef CONFIG_STACK_GROWSUP
-unsigned long expandable_stack_area(struct vm_area_struct *vma,
- unsigned long address, unsigned long *gap)
-{
- struct vm_area_struct *next = vma->vm_next;
- unsigned long guard_gap = stack_guard_gap;
- unsigned long guard_addr;
-
- address = ALIGN(address, PAGE_SIZE);;
- if (!next)
- goto out;
-
- if (next->vm_flags & VM_GROWSUP) {
- guard_gap = min(guard_gap, next->vm_start - address);
- goto out;
- }
-
- if (next->vm_start - address < guard_gap)
- return -ENOMEM;
-out:
- if (TASK_SIZE - address < guard_gap)
- guard_gap = TASK_SIZE - address;
- guard_addr = address + guard_gap;
- *gap = guard_gap;
-
- return guard_addr;
-}
-
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- unsigned long gap;
-
- address = expandable_stack_area(vma, address, &gap);
- if (IS_ERR_VALUE(address))
- return -ENOMEM;
- return expand_upwards(vma, address, gap);
-}
-
-int stack_guard_area(struct vm_area_struct *vma, unsigned long address)
-{
struct vm_area_struct *next;
- if (!(vma->vm_flags & VM_GROWSUP))
- return 0;
-
- /*
- * strictly speaking there is a guard gap between disjoint stacks
- * but the gap is not canonical (it might be smaller) and it is
- * reasonably safe to assume that we can ignore that gap for stack
- * POPULATE or /proc/<pid>[s]maps purposes
- */
+ address &= PAGE_MASK;
next = vma->vm_next;
- if (next && next->vm_flags & VM_GROWSUP)
- return 0;
-
- return vma->vm_end - address <= stack_guard_gap;
+ if (next && next->vm_start == address + PAGE_SIZE) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ }
+ return expand_upwards(vma, address);
}
struct vm_area_struct *
@@ -2418,73 +2380,17 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
return prev;
}
#else
-unsigned long expandable_stack_area(struct vm_area_struct *vma,
- unsigned long address, unsigned long *gap)
-{
- struct vm_area_struct *prev = vma->vm_prev;
- unsigned long guard_gap = stack_guard_gap;
- unsigned long guard_addr;
-
- address &= PAGE_MASK;
- if (!prev)
- goto out;
-
- /*
- * Is there a mapping abutting this one below?
- *
- * That's only ok if it's the same stack mapping
- * that has gotten split or there is sufficient gap
- * between mappings
- */
- if (prev->vm_flags & VM_GROWSDOWN) {
- guard_gap = min(guard_gap, address - prev->vm_end);
- goto out;
- }
-
- if (address - prev->vm_end < guard_gap)
- return -ENOMEM;
-
-out:
- /* make sure we won't underflow */
- if (address < mmap_min_addr)
- return -ENOMEM;
- if (address - mmap_min_addr < guard_gap)
- guard_gap = address - mmap_min_addr;
-
- guard_addr = address - guard_gap;
- *gap = guard_gap;
-
- return guard_addr;
-}
-
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- unsigned long gap;
-
- address = expandable_stack_area(vma, address, &gap);
- if (IS_ERR_VALUE(address))
- return -ENOMEM;
- return expand_downwards(vma, address, gap);
-}
-
-int stack_guard_area(struct vm_area_struct *vma, unsigned long address)
-{
struct vm_area_struct *prev;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return 0;
-
- /*
- * strictly speaking there is a guard gap between disjoint stacks
- * but the gap is not canonical (it might be smaller) and it is
- * reasonably safe to assume that we can ignore that gap for stack
- * POPULATE or /proc/<pid>[s]maps purposes
- */
+ address &= PAGE_MASK;
prev = vma->vm_prev;
- if (prev && prev->vm_flags & VM_GROWSDOWN)
- return 0;
-
- return address - vma->vm_start < stack_guard_gap;
+ if (prev && prev->vm_end == address) {
+ if (!(prev->vm_flags & VM_GROWSDOWN))
+ return -ENOMEM;
+ }
+ return expand_downwards(vma, address);
}
struct vm_area_struct *
--
2.11.0

View File

@ -1,48 +0,0 @@
From 0bfadbc4942a14d702d781c5b6a00ec747f4ed09 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= <f.gruenbichler@proxmox.com>
Date: Fri, 23 Jun 2017 08:25:04 +0200
Subject: [PATCH 2/4] Revert "mm: do not collapse stack gap into THP"
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This reverts commit e9dbbeb2e0b61881d67ba7818fd4b3f996a35f0b.
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
mm/huge_memory.c | 3 ---
mm/khugepaged.c | 4 ----
2 files changed, 7 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 75719aa0443a..49cb70b5993d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -660,9 +660,6 @@ int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
return VM_FAULT_FALLBACK;
- if (stack_guard_area(vma, haddr) ||
- stack_guard_area(vma, haddr + HPAGE_PMD_SIZE))
- return VM_FAULT_FALLBACK;
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 16379e5943a6..77ae3239c3de 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -859,10 +859,6 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
return SCAN_ADDRESS_RANGE;
if (!hugepage_vma_check(vma))
return SCAN_VMA_CHECK;
-
- /* never try to collapse stack gap */
- if (stack_guard_area(vma, hstart) || stack_guard_area(vma, hend))
- return SCAN_ADDRESS_RANGE;
return 0;
}
--
2.11.0

View File

@ -1,940 +0,0 @@
From f2ca8eceb0ddcdb6b113b3edd3dfd36c171854f6 Mon Sep 17 00:00:00 2001
From: Hugh Dickins <hughd@google.com>
Date: Mon, 19 Jun 2017 04:03:24 -0700
Subject: [PATCH 3/4] mm: larger stack guard gap, between vmas
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
commit 1be7107fbe18eed3e319a6c3e83c78254b693acb upstream.
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <oleg@redhat.com>
Original-patch-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Tested-by: Helge Deller <deller@gmx.de> # parisc
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
[wt: backport to 4.11: adjust context]
Signed-off-by: Willy Tarreau <w@1wt.eu>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
[fg: backport to 4.10: adjust context]
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
Documentation/admin-guide/kernel-parameters.txt | 7 ++
include/linux/mm.h | 53 ++++-----
arch/arc/mm/mmap.c | 2 +-
arch/arm/mm/mmap.c | 4 +-
arch/frv/mm/elf-fdpic.c | 2 +-
arch/mips/mm/mmap.c | 2 +-
arch/parisc/kernel/sys_parisc.c | 15 ++-
arch/powerpc/mm/hugetlbpage-radix.c | 2 +-
arch/powerpc/mm/mmap.c | 4 +-
arch/powerpc/mm/slice.c | 2 +-
arch/s390/mm/mmap.c | 4 +-
arch/sh/mm/mmap.c | 4 +-
arch/sparc/kernel/sys_sparc_64.c | 4 +-
arch/sparc/mm/hugetlbpage.c | 2 +-
arch/tile/mm/hugetlbpage.c | 2 +-
arch/x86/kernel/sys_x86_64.c | 4 +-
arch/x86/mm/hugetlbpage.c | 2 +-
arch/xtensa/kernel/syscall.c | 2 +-
fs/hugetlbfs/inode.c | 2 +-
fs/proc/task_mmu.c | 4 -
mm/gup.c | 5 -
mm/memory.c | 38 ------
mm/mmap.c | 149 ++++++++++++++----------
23 files changed, 152 insertions(+), 163 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index d034a76bcae6..9eb4da6e980a 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3744,6 +3744,13 @@
spia_pedr=
spia_peddr=
+ stack_guard_gap= [MM]
+ override the default stack gap protection. The value
+ is in page units and it defines how many pages prior
+ to (for stacks growing down) resp. after (for stacks
+ growing up) the main stack are reserved for no other
+ mapping. Default value is 256 pages.
+
stacktrace [FTRACE]
Enabled the stack tracer on boot up.
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3978a350e9e4..705bf34d0805 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1366,39 +1366,11 @@ int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{
return !vma->vm_ops;
}
-static inline int stack_guard_page_start(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSDOWN) &&
- (vma->vm_start == addr) &&
- !vma_growsdown(vma->vm_prev, addr);
-}
-
-/* Is the vma a continuation of the stack vma below it? */
-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
-}
-
-static inline int stack_guard_page_end(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSUP) &&
- (vma->vm_end == addr) &&
- !vma_growsup(vma->vm_next, addr);
-}
-
int vma_is_stack_for_current(struct vm_area_struct *vma);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
@@ -2139,6 +2111,7 @@ void page_cache_async_readahead(struct address_space *mapping,
pgoff_t offset,
unsigned long size);
+extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
@@ -2167,6 +2140,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
return vma;
}
+static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+{
+ unsigned long vm_start = vma->vm_start;
+
+ if (vma->vm_flags & VM_GROWSDOWN) {
+ vm_start -= stack_guard_gap;
+ if (vm_start > vma->vm_start)
+ vm_start = 0;
+ }
+ return vm_start;
+}
+
+static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+{
+ unsigned long vm_end = vma->vm_end;
+
+ if (vma->vm_flags & VM_GROWSUP) {
+ vm_end += stack_guard_gap;
+ if (vm_end < vma->vm_end)
+ vm_end = -PAGE_SIZE;
+ }
+ return vm_end;
+}
+
static inline unsigned long vma_pages(struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 2e06d56e987b..cf4ae6958240 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 66353caa35b9..641334ebf46d 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index 836f14707a62..efa59f1f8022 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
addr = PAGE_ALIGN(addr);
vma = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
goto success;
}
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index d08ea3ff0f53..a44052c05f93 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index bf3294171230..5fb3f0ef2906 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
unsigned long task_size = TASK_SIZE;
int do_color_align, last_mmap;
struct vm_unmapped_area_info info;
@@ -115,9 +115,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
else
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
goto found_addr;
}
@@ -141,7 +142,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_color_align, last_mmap;
@@ -175,9 +176,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
goto found_addr;
}
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 35254a678456..a2b2d97f7eda 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -65,7 +65,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
/*
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 2f1e44362198..5bc2845cddf4 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -106,7 +106,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -142,7 +142,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 2b27458902ee..c4d5c9c61e0f 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
if ((mm->task_size - len) < addr)
return 0;
vma = find_vma(mm, addr);
- return (!vma || (addr + len) <= vma->vm_start);
+ return (!vma || (addr + len) <= vm_start_gap(vma));
}
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index eb9df2822da1..812368f274c9 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -98,7 +98,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -136,7 +136,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 6777177807c2..7df7d5944188 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 884c70331345..f39dd87b77c4 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -118,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -181,7 +181,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 988acc8b1b80..58cde8d9be8a 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -116,7 +116,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, HPAGE_SIZE);
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 77ceaa343fce..67508b249ede 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -232,7 +232,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (current->mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index a55ed63b9f91..1119414ab419 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -140,7 +140,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (end - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 2ae8584b44c7..fe342e8ed529 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -144,7 +144,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index d3fd100dffc9..dceb0ee37db3 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -87,7 +87,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
- if (!vmm || addr + len <= vmm->vm_start)
+ if (!vmm || addr + len <= vm_start_gap(vmm))
return addr;
addr = vmm->vm_end;
if (flags & MAP_SHARED)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 54de77e78775..19a787eb8a00 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -191,7 +191,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 52049b595b0f..5e6bab59e219 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -302,11 +302,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
- if (stack_guard_page_start(vma, start))
- start += PAGE_SIZE;
end = vma->vm_end;
- if (stack_guard_page_end(vma, end))
- end -= PAGE_SIZE;
seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
diff --git a/mm/gup.c b/mm/gup.c
index bb5f3d69f87e..bdd74b782b6e 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -371,11 +371,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
/* mlock all present pages, but do not fault in new pages */
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT;
- /* For mm_populate(), just skip the stack guard page. */
- if ((*flags & FOLL_POPULATE) &&
- (stack_guard_page_start(vma, address) ||
- stack_guard_page_end(vma, address + PAGE_SIZE)))
- return -ENOENT;
if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (*flags & FOLL_REMOTE)
diff --git a/mm/memory.c b/mm/memory.c
index c89214451507..6d54a9b15520 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2715,40 +2715,6 @@ int do_swap_page(struct vm_fault *vmf)
}
/*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
-{
- address &= PAGE_MASK;
- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
- struct vm_area_struct *prev = vma->vm_prev;
-
- /*
- * Is there a mapping abutting this one below?
- *
- * That's only ok if it's the same stack mapping
- * that has gotten split..
- */
- if (prev && prev->vm_end == address)
- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
- return expand_downwards(vma, address - PAGE_SIZE);
- }
- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
- struct vm_area_struct *next = vma->vm_next;
-
- /* As VM_GROWSDOWN but s/below/above/ */
- if (next && next->vm_start == address + PAGE_SIZE)
- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
- return expand_upwards(vma, address + PAGE_SIZE);
- }
- return 0;
-}
-
-/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2764,10 +2730,6 @@ static int do_anonymous_page(struct vm_fault *vmf)
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
- /* Check if we need to add a guard page to the stack */
- if (check_stack_guard_page(vma, vmf->address) < 0)
- return VM_FAULT_SIGSEGV;
-
/*
* Use pte_alloc() instead of pte_alloc_map(). We can't run
* pte_offset_map() on pmds where a huge pmd might be created
diff --git a/mm/mmap.c b/mm/mmap.c
index 4b3a2aaa18e9..4acc20fc5c81 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -183,6 +183,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
unsigned long retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *next;
unsigned long min_brk;
bool populate;
@@ -228,7 +229,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
}
/* Check against existing mmap mappings. */
- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+ next = find_vma(mm, oldbrk);
+ if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
/* Ok, looks good - let it rip. */
@@ -251,10 +253,22 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
static long vma_compute_subtree_gap(struct vm_area_struct *vma)
{
- unsigned long max, subtree_gap;
- max = vma->vm_start;
- if (vma->vm_prev)
- max -= vma->vm_prev->vm_end;
+ unsigned long max, prev_end, subtree_gap;
+
+ /*
+ * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
+ * allow two stack_guard_gaps between them here, and when choosing
+ * an unmapped area; whereas when expanding we only require one.
+ * That's a little inconsistent, but keeps the code here simpler.
+ */
+ max = vm_start_gap(vma);
+ if (vma->vm_prev) {
+ prev_end = vm_end_gap(vma->vm_prev);
+ if (max > prev_end)
+ max -= prev_end;
+ else
+ max = 0;
+ }
if (vma->vm_rb.rb_left) {
subtree_gap = rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -350,7 +364,7 @@ static void validate_mm(struct mm_struct *mm)
anon_vma_unlock_read(anon_vma);
}
- highest_address = vma->vm_end;
+ highest_address = vm_end_gap(vma);
vma = vma->vm_next;
i++;
}
@@ -539,7 +553,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = vma->vm_end;
+ mm->highest_vm_end = vm_end_gap(vma);
/*
* vma->vm_prev wasn't known when we followed the rbtree to find the
@@ -854,7 +868,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
vma_gap_update(vma);
if (end_changed) {
if (!next)
- mm->highest_vm_end = end;
+ mm->highest_vm_end = vm_end_gap(vma);
else if (!adjust_next)
vma_gap_update(next);
}
@@ -939,7 +953,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
* mm->highest_vm_end doesn't need any update
* in remove_next == 1 case.
*/
- VM_WARN_ON(mm->highest_vm_end != end);
+ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
}
}
if (insert && file)
@@ -1783,7 +1797,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
while (true) {
/* Visit left subtree if it looks promising */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
@@ -1794,7 +1808,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
}
}
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
@@ -1821,8 +1835,8 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_left) {
- gap_start = vma->vm_prev->vm_end;
- gap_end = vma->vm_start;
+ gap_start = vm_end_gap(vma->vm_prev);
+ gap_end = vm_start_gap(vma);
goto check_current;
}
}
@@ -1886,7 +1900,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
while (true) {
/* Visit right subtree if it looks promising */
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
@@ -1899,7 +1913,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
check_current:
/* Check if current node has a suitable gap */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end < low_limit)
return -ENOMEM;
if (gap_start <= high_limit && gap_end - gap_start >= length)
@@ -1925,7 +1939,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) {
gap_start = vma->vm_prev ?
- vma->vm_prev->vm_end : 0;
+ vm_end_gap(vma->vm_prev) : 0;
goto check_current;
}
}
@@ -1963,7 +1977,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info;
if (len > TASK_SIZE - mmap_min_addr)
@@ -1974,9 +1988,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
@@ -1999,7 +2014,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
@@ -2014,9 +2029,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
@@ -2151,21 +2167,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
* update accounting. This is shared with both the
* grow-up and grow-down cases.
*/
-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma,
+ unsigned long size, unsigned long grow)
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
- unsigned long new_start, actual_size;
+ unsigned long new_start;
/* address space limit tests */
if (!may_expand_vm(mm, vma->vm_flags, grow))
return -ENOMEM;
/* Stack limit test */
- actual_size = size;
- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
- actual_size -= PAGE_SIZE;
- if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
@@ -2203,17 +2217,30 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *next;
+ unsigned long gap_addr;
int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
/* Guard against wrapping around to address 0. */
- if (address < PAGE_ALIGN(address+4))
- address = PAGE_ALIGN(address+4);
- else
+ address &= PAGE_MASK;
+ address += PAGE_SIZE;
+ if (!address)
return -ENOMEM;
+ /* Enforce stack_guard_gap */
+ gap_addr = address + stack_guard_gap;
+ if (gap_addr < address)
+ return -ENOMEM;
+ next = vma->vm_next;
+ if (next && next->vm_start < gap_addr) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
@@ -2257,7 +2284,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = address;
+ mm->highest_vm_end = vm_end_gap(vma);
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
@@ -2278,6 +2305,8 @@ int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *prev;
+ unsigned long gap_addr;
int error;
address &= PAGE_MASK;
@@ -2285,6 +2314,17 @@ int expand_downwards(struct vm_area_struct *vma,
if (error)
return error;
+ /* Enforce stack_guard_gap */
+ gap_addr = address - stack_guard_gap;
+ if (gap_addr > address)
+ return -ENOMEM;
+ prev = vma->vm_prev;
+ if (prev && prev->vm_end > gap_addr) {
+ if (!(prev->vm_flags & VM_GROWSDOWN))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
@@ -2339,28 +2379,25 @@ int expand_downwards(struct vm_area_struct *vma,
return error;
}
-/*
- * Note how expand_stack() refuses to expand the stack all the way to
- * abut the next virtual mapping, *unless* that mapping itself is also
- * a stack mapping. We want to leave room for a guard page, after all
- * (the guard page itself is not added here, that is done by the
- * actual page faulting logic)
- *
- * This matches the behavior of the guard page logic (see mm/memory.c:
- * check_stack_guard_page()), which only allows the guard page to be
- * removed under these circumstances.
- */
+/* enforced gap between the expanding stack and other mappings. */
+unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+
+static int __init cmdline_parse_stack_guard_gap(char *p)
+{
+ unsigned long val;
+ char *endptr;
+
+ val = simple_strtoul(p, &endptr, 10);
+ if (!*endptr)
+ stack_guard_gap = val << PAGE_SHIFT;
+
+ return 0;
+}
+__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
+
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *next;
-
- address &= PAGE_MASK;
- next = vma->vm_next;
- if (next && next->vm_start == address + PAGE_SIZE) {
- if (!(next->vm_flags & VM_GROWSUP))
- return -ENOMEM;
- }
return expand_upwards(vma, address);
}
@@ -2382,14 +2419,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *prev;
-
- address &= PAGE_MASK;
- prev = vma->vm_prev;
- if (prev && prev->vm_end == address) {
- if (!(prev->vm_flags & VM_GROWSDOWN))
- return -ENOMEM;
- }
return expand_downwards(vma, address);
}
@@ -2487,7 +2516,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
vma->vm_prev = prev;
vma_gap_update(vma);
} else
- mm->highest_vm_end = prev ? prev->vm_end : 0;
+ mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
tail_vma->vm_next = NULL;
/* Kill the cache */
--
2.11.0

View File

@ -1,54 +0,0 @@
From e9a8b9465f735bda98313627c38300796694f188 Mon Sep 17 00:00:00 2001
From: Hugh Dickins <hughd@google.com>
Date: Tue, 20 Jun 2017 02:10:44 -0700
Subject: [PATCH 4/4] mm: fix new crash in unmapped_area_topdown()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Trinity gets kernel BUG at mm/mmap.c:1963! in about 3 minutes of
mmap testing. That's the VM_BUG_ON(gap_end < gap_start) at the
end of unmapped_area_topdown(). Linus points out how MAP_FIXED
(which does not have to respect our stack guard gap intentions)
could result in gap_end below gap_start there. Fix that, and
the similar case in its alternative, unmapped_area().
Cc: stable@vger.kernel.org
Fixes: 1be7107fbe18 ("mm: larger stack guard gap, between vmas")
Reported-by: Dave Jones <davej@codemonkey.org.uk>
Debugged-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
mm/mmap.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c
index 4acc20fc5c81..ece0e5a2a25b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1813,7 +1813,8 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
return -ENOMEM;
- if (gap_end >= low_limit && gap_end - gap_start >= length)
+ if (gap_end >= low_limit &&
+ gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit right subtree if it looks promising */
@@ -1916,7 +1917,8 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
gap_end = vm_start_gap(vma);
if (gap_end < low_limit)
return -ENOMEM;
- if (gap_start <= high_limit && gap_end - gap_start >= length)
+ if (gap_start <= high_limit &&
+ gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit left subtree if it looks promising */
--
2.11.0

View File

@ -1,54 +0,0 @@
From fa0ad0349d68f7e86419922266aa48de3eb2c507 Mon Sep 17 00:00:00 2001
From: Eric Dumazet <edumazet@google.com>
Date: Tue, 6 Jun 2017 18:16:00 +0200
Subject: [PATCH] dccp/tcp: do not inherit mc_list from parent
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
syzkaller found a way to trigger double frees from ip_mc_drop_socket()
It turns out that leave a copy of parent mc_list at accept() time,
which is very bad.
Very similar to commit 8b485ce69876 ("tcp: do not inherit
fastopen_req from parent")
Initial report from Pray3r, completed by Andrey one.
Thanks a lot to them !
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Pray3r <pray3r.z@gmail.com>
Reported-by: Andrey Konovalov <andreyknvl@google.com>
Tested-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
CVE-2017-8890
(cherry-picked from 657831ffc38e30092a2d5f03d385d710eb88b09a)
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Colin Ian King <colin.king@canonical.com>
Acked-by: Andy Whitcroft <apw@canonical.com>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
net/ipv4/inet_connection_sock.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 19ea045c50ed..d952cfa15737 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -669,6 +669,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
/* listeners have SOCK_RCU_FREE, not the children */
sock_reset_flag(newsk, SOCK_RCU_FREE);
+ inet_sk(newsk)->mc_list = NULL;
+
newsk->sk_mark = inet_rsk(req)->ir_mark;
atomic64_set(&newsk->sk_cookie,
atomic64_read(&inet_rsk(req)->ir_cookie));
--
2.11.0

View File

@ -1,96 +0,0 @@
From 7dd7eb9513bd02184d45f000ab69d78cb1fa1531 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Wed, 17 May 2017 22:54:11 -0400
Subject: [PATCH] ipv6: Check ip6_find_1stfragopt() return value properly.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Do not use unsigned variables to see if it returns a negative
error or not.
Fixes: 2423496af35d ("ipv6: Prevent overrun when parsing v6 header options")
Reported-by: Julia Lawall <julia.lawall@lip6.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
net/ipv6/ip6_offload.c | 9 ++++-----
net/ipv6/ip6_output.c | 7 +++----
net/ipv6/udp_offload.c | 8 +++++---
3 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index eab36abc9f22..280268f1dd7b 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -63,7 +63,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
const struct net_offload *ops;
int proto;
struct frag_hdr *fptr;
- unsigned int unfrag_ip6hlen;
unsigned int payload_len;
u8 *prevhdr;
int offset = 0;
@@ -116,10 +115,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
skb->network_header = (u8 *)ipv6h - skb->head;
if (udpfrag) {
- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
- if (unfrag_ip6hlen < 0)
- return ERR_PTR(unfrag_ip6hlen);
- fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
+ int err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ return ERR_PTR(err);
+ fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
fptr->frag_off = htons(offset);
if (skb->next)
fptr->frag_off |= htons(IP6_MF);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 01deecda2f84..d4a31becbd25 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -597,11 +597,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int ptr, offset = 0, err = 0;
u8 *prevhdr, nexthdr = 0;
- hlen = ip6_find_1stfragopt(skb, &prevhdr);
- if (hlen < 0) {
- err = hlen;
+ err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
goto fail;
- }
+ hlen = err;
nexthdr = *prevhdr;
mtu = ip6_skb_dst_mtu(skb);
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index b348cff47395..a2267f80febb 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -29,6 +29,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
u8 frag_hdr_sz = sizeof(struct frag_hdr);
__wsum csum;
int tnl_hlen;
+ int err;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
@@ -90,9 +91,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
*/
- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
- if (unfrag_ip6hlen < 0)
- return ERR_PTR(unfrag_ip6hlen);
+ err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ return ERR_PTR(err);
+ unfrag_ip6hlen = err;
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
--
2.11.0

View File

@ -1,244 +0,0 @@
From 1b56ac5ee17e975a07740b4c865918984ec14b52 Mon Sep 17 00:00:00 2001
From: Craig Gallek <kraig@google.com>
Date: Tue, 16 May 2017 14:36:23 -0400
Subject: [PATCH] ipv6: Prevent overrun when parsing v6 header options
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The KASAN warning repoted below was discovered with a syzkaller
program. The reproducer is basically:
int s = socket(AF_INET6, SOCK_RAW, NEXTHDR_HOP);
send(s, &one_byte_of_data, 1, MSG_MORE);
send(s, &more_than_mtu_bytes_data, 2000, 0);
The socket() call sets the nexthdr field of the v6 header to
NEXTHDR_HOP, the first send call primes the payload with a non zero
byte of data, and the second send call triggers the fragmentation path.
The fragmentation code tries to parse the header options in order
to figure out where to insert the fragment option. Since nexthdr points
to an invalid option, the calculation of the size of the network header
can made to be much larger than the linear section of the skb and data
is read outside of it.
This fix makes ip6_find_1stfrag return an error if it detects
running out-of-bounds.
[ 42.361487] ==================================================================
[ 42.364412] BUG: KASAN: slab-out-of-bounds in ip6_fragment+0x11c8/0x3730
[ 42.365471] Read of size 840 at addr ffff88000969e798 by task ip6_fragment-oo/3789
[ 42.366469]
[ 42.366696] CPU: 1 PID: 3789 Comm: ip6_fragment-oo Not tainted 4.11.0+ #41
[ 42.367628] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.1-1ubuntu1 04/01/2014
[ 42.368824] Call Trace:
[ 42.369183] dump_stack+0xb3/0x10b
[ 42.369664] print_address_description+0x73/0x290
[ 42.370325] kasan_report+0x252/0x370
[ 42.370839] ? ip6_fragment+0x11c8/0x3730
[ 42.371396] check_memory_region+0x13c/0x1a0
[ 42.371978] memcpy+0x23/0x50
[ 42.372395] ip6_fragment+0x11c8/0x3730
[ 42.372920] ? nf_ct_expect_unregister_notifier+0x110/0x110
[ 42.373681] ? ip6_copy_metadata+0x7f0/0x7f0
[ 42.374263] ? ip6_forward+0x2e30/0x2e30
[ 42.374803] ip6_finish_output+0x584/0x990
[ 42.375350] ip6_output+0x1b7/0x690
[ 42.375836] ? ip6_finish_output+0x990/0x990
[ 42.376411] ? ip6_fragment+0x3730/0x3730
[ 42.376968] ip6_local_out+0x95/0x160
[ 42.377471] ip6_send_skb+0xa1/0x330
[ 42.377969] ip6_push_pending_frames+0xb3/0xe0
[ 42.378589] rawv6_sendmsg+0x2051/0x2db0
[ 42.379129] ? rawv6_bind+0x8b0/0x8b0
[ 42.379633] ? _copy_from_user+0x84/0xe0
[ 42.380193] ? debug_check_no_locks_freed+0x290/0x290
[ 42.380878] ? ___sys_sendmsg+0x162/0x930
[ 42.381427] ? rcu_read_lock_sched_held+0xa3/0x120
[ 42.382074] ? sock_has_perm+0x1f6/0x290
[ 42.382614] ? ___sys_sendmsg+0x167/0x930
[ 42.383173] ? lock_downgrade+0x660/0x660
[ 42.383727] inet_sendmsg+0x123/0x500
[ 42.384226] ? inet_sendmsg+0x123/0x500
[ 42.384748] ? inet_recvmsg+0x540/0x540
[ 42.385263] sock_sendmsg+0xca/0x110
[ 42.385758] SYSC_sendto+0x217/0x380
[ 42.386249] ? SYSC_connect+0x310/0x310
[ 42.386783] ? __might_fault+0x110/0x1d0
[ 42.387324] ? lock_downgrade+0x660/0x660
[ 42.387880] ? __fget_light+0xa1/0x1f0
[ 42.388403] ? __fdget+0x18/0x20
[ 42.388851] ? sock_common_setsockopt+0x95/0xd0
[ 42.389472] ? SyS_setsockopt+0x17f/0x260
[ 42.390021] ? entry_SYSCALL_64_fastpath+0x5/0xbe
[ 42.390650] SyS_sendto+0x40/0x50
[ 42.391103] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.391731] RIP: 0033:0x7fbbb711e383
[ 42.392217] RSP: 002b:00007ffff4d34f28 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
[ 42.393235] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fbbb711e383
[ 42.394195] RDX: 0000000000001000 RSI: 00007ffff4d34f60 RDI: 0000000000000003
[ 42.395145] RBP: 0000000000000046 R08: 00007ffff4d34f40 R09: 0000000000000018
[ 42.396056] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000400aad
[ 42.396598] R13: 0000000000000066 R14: 00007ffff4d34ee0 R15: 00007fbbb717af00
[ 42.397257]
[ 42.397411] Allocated by task 3789:
[ 42.397702] save_stack_trace+0x16/0x20
[ 42.398005] save_stack+0x46/0xd0
[ 42.398267] kasan_kmalloc+0xad/0xe0
[ 42.398548] kasan_slab_alloc+0x12/0x20
[ 42.398848] __kmalloc_node_track_caller+0xcb/0x380
[ 42.399224] __kmalloc_reserve.isra.32+0x41/0xe0
[ 42.399654] __alloc_skb+0xf8/0x580
[ 42.400003] sock_wmalloc+0xab/0xf0
[ 42.400346] __ip6_append_data.isra.41+0x2472/0x33d0
[ 42.400813] ip6_append_data+0x1a8/0x2f0
[ 42.401122] rawv6_sendmsg+0x11ee/0x2db0
[ 42.401505] inet_sendmsg+0x123/0x500
[ 42.401860] sock_sendmsg+0xca/0x110
[ 42.402209] ___sys_sendmsg+0x7cb/0x930
[ 42.402582] __sys_sendmsg+0xd9/0x190
[ 42.402941] SyS_sendmsg+0x2d/0x50
[ 42.403273] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.403718]
[ 42.403871] Freed by task 1794:
[ 42.404146] save_stack_trace+0x16/0x20
[ 42.404515] save_stack+0x46/0xd0
[ 42.404827] kasan_slab_free+0x72/0xc0
[ 42.405167] kfree+0xe8/0x2b0
[ 42.405462] skb_free_head+0x74/0xb0
[ 42.405806] skb_release_data+0x30e/0x3a0
[ 42.406198] skb_release_all+0x4a/0x60
[ 42.406563] consume_skb+0x113/0x2e0
[ 42.406910] skb_free_datagram+0x1a/0xe0
[ 42.407288] netlink_recvmsg+0x60d/0xe40
[ 42.407667] sock_recvmsg+0xd7/0x110
[ 42.408022] ___sys_recvmsg+0x25c/0x580
[ 42.408395] __sys_recvmsg+0xd6/0x190
[ 42.408753] SyS_recvmsg+0x2d/0x50
[ 42.409086] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.409513]
[ 42.409665] The buggy address belongs to the object at ffff88000969e780
[ 42.409665] which belongs to the cache kmalloc-512 of size 512
[ 42.410846] The buggy address is located 24 bytes inside of
[ 42.410846] 512-byte region [ffff88000969e780, ffff88000969e980)
[ 42.411941] The buggy address belongs to the page:
[ 42.412405] page:ffffea000025a780 count:1 mapcount:0 mapping: (null) index:0x0 compound_mapcount: 0
[ 42.413298] flags: 0x100000000008100(slab|head)
[ 42.413729] raw: 0100000000008100 0000000000000000 0000000000000000 00000001800c000c
[ 42.414387] raw: ffffea00002a9500 0000000900000007 ffff88000c401280 0000000000000000
[ 42.415074] page dumped because: kasan: bad access detected
[ 42.415604]
[ 42.415757] Memory state around the buggy address:
[ 42.416222] ffff88000969e880: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 42.416904] ffff88000969e900: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 42.417591] >ffff88000969e980: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[ 42.418273] ^
[ 42.418588] ffff88000969ea00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 42.419273] ffff88000969ea80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 42.419882] ==================================================================
Reported-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Craig Gallek <kraig@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
CVE-2017-9074
(cherry-picked from 2423496af35d94a87156b063ea5cedffc10a70a1)
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Colin King <colin.king@canonical.com>
Acked-by: Andy Whitcroft <andy.whitcroft@canonical.com>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
net/ipv6/ip6_offload.c | 2 ++
net/ipv6/ip6_output.c | 4 ++++
net/ipv6/output_core.c | 14 ++++++++------
net/ipv6/udp_offload.c | 2 ++
4 files changed, 16 insertions(+), 6 deletions(-)
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 33b04ec2744a..9881a87696bc 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -117,6 +117,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
if (udpfrag) {
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ if (unfrag_ip6hlen < 0)
+ return ERR_PTR(unfrag_ip6hlen);
fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
fptr->frag_off = htons(offset);
if (skb->next)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d57f4ee5ec29..8f0814d301aa 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -597,6 +597,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
u8 *prevhdr, nexthdr = 0;
hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ if (hlen < 0) {
+ err = hlen;
+ goto fail;
+ }
nexthdr = *prevhdr;
mtu = ip6_skb_dst_mtu(skb);
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index cd4252346a32..e9065b8d3af8 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -79,14 +79,13 @@ EXPORT_SYMBOL(ipv6_select_ident);
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
{
u16 offset = sizeof(struct ipv6hdr);
- struct ipv6_opt_hdr *exthdr =
- (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
unsigned int packet_len = skb_tail_pointer(skb) -
skb_network_header(skb);
int found_rhdr = 0;
*nexthdr = &ipv6_hdr(skb)->nexthdr;
- while (offset + 1 <= packet_len) {
+ while (offset <= packet_len) {
+ struct ipv6_opt_hdr *exthdr;
switch (**nexthdr) {
@@ -107,13 +106,16 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
return offset;
}
- offset += ipv6_optlen(exthdr);
- *nexthdr = &exthdr->nexthdr;
+ if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
+ return -EINVAL;
+
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
+ offset += ipv6_optlen(exthdr);
+ *nexthdr = &exthdr->nexthdr;
}
- return offset;
+ return -EINVAL;
}
EXPORT_SYMBOL(ip6_find_1stfragopt);
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index ac858c480f2f..b348cff47395 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -91,6 +91,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
* bytes to insert fragment header.
*/
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ if (unfrag_ip6hlen < 0)
+ return ERR_PTR(unfrag_ip6hlen);
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
--
2.11.0

View File

@ -1,46 +0,0 @@
From eaa390df0e82e771601fa68482c4f022674e904f Mon Sep 17 00:00:00 2001
From: Eric Dumazet <edumazet@google.com>
Date: Wed, 17 May 2017 07:16:40 -0700
Subject: [PATCH] sctp: do not inherit ipv6_{mc|ac|fl}_list from parent
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
SCTP needs fixes similar to 83eaddab4378 ("ipv6/dccp: do not inherit
ipv6_mc_list from parent"), otherwise bad things can happen.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Andrey Konovalov <andreyknvl@google.com>
Tested-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
CVE-2017-9075
(cherry-picked from fdcee2cbb8438702ea1b328fb6e0ac5e9a40c7f8)
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Colin King <colin.king@canonical.com>
Acked-by: Andy Whitcroft <andy.whitcroft@canonical.com>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
net/sctp/ipv6.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 64dfd35ccdcc..ef0ed6bb71e9 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -666,6 +666,9 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
rcu_read_lock();
opt = rcu_dereference(np->opt);
--
2.11.0

View File

@ -1,78 +0,0 @@
From ef8ae9e80ab0846763c6405968852e19c9a87782 Mon Sep 17 00:00:00 2001
From: WANG Cong <xiyou.wangcong@gmail.com>
Date: Wed, 7 Jun 2017 12:28:27 +0200
Subject: [PATCH] ipv6/dccp: do not inherit ipv6_mc_list from parent
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Like commit 657831ffc38e ("dccp/tcp: do not inherit mc_list from parent")
we should clear ipv6_mc_list etc. for IPv6 sockets too.
Cc: Eric Dumazet <edumazet@google.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
CVE-2017-9076
CVE-2017-9077
(cherry-picked from 83eaddab4378db256d00d295bda6ca997cd13a52)
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Colin Ian King <colin.king@canonical.com>
Acked-by: Andy Whitcroft <apw@canonical.com>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
net/dccp/ipv6.c | 6 ++++++
net/ipv6/tcp_ipv6.c | 2 ++
2 files changed, 8 insertions(+)
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 2f3e8bbe2cb9..8f41327c1edf 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -426,6 +426,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
newnp->pktoptions = NULL;
newnp->opt = NULL;
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
@@ -490,6 +493,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index cfc232714139..c0ca1218801b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1055,6 +1055,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
+ newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
@@ -1124,6 +1125,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
First: no IPv4 options.
*/
newinet->inet_opt = NULL;
+ newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
--
2.11.0

View File

@ -1,79 +0,0 @@
From 2bb2c56a2089211713a7b1fb379cce741f65ea9e Mon Sep 17 00:00:00 2001
From: Eric Dumazet <edumazet@google.com>
Date: Fri, 19 May 2017 14:17:48 -0700
Subject: [PATCH] ipv6: fix out of bound writes in __ip6_append_data()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Andrey Konovalov and idaifish@gmail.com reported crashes caused by
one skb shared_info being overwritten from __ip6_append_data()
Andrey program lead to following state :
copy -4200 datalen 2000 fraglen 2040
maxfraglen 2040 alloclen 2048 transhdrlen 0 offset 0 fraggap 6200
The skb_copy_and_csum_bits(skb_prev, maxfraglen, data + transhdrlen,
fraggap, 0); is overwriting skb->head and skb_shared_info
Since we apparently detect this rare condition too late, move the
code earlier to even avoid allocating skb and risking crashes.
Once again, many thanks to Andrey and syzkaller team.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Andrey Konovalov <andreyknvl@google.com>
Tested-by: Andrey Konovalov <andreyknvl@google.com>
Reported-by: <idaifish@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
CVE-2017-9242
(cherry-picked from 232cd35d0804cc241eb887bb8d4d9b3b9881c64a)
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Colin King <colin.king@canonical.com>
Acked-by: Andy Whitcroft <andy.whitcroft@canonical.com>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
net/ipv6/ip6_output.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 8f0814d301aa..135ee573eaef 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1463,6 +1463,11 @@ static int __ip6_append_data(struct sock *sk,
*/
alloclen += sizeof(struct frag_hdr);
+ copy = datalen - transhdrlen - fraggap;
+ if (copy < 0) {
+ err = -EINVAL;
+ goto error;
+ }
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len,
@@ -1512,13 +1517,9 @@ static int __ip6_append_data(struct sock *sk,
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
- copy = datalen - transhdrlen - fraggap;
-
- if (copy < 0) {
- err = -EINVAL;
- kfree_skb(skb);
- goto error;
- } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
+ if (copy > 0 &&
+ getfrag(from, data + transhdrlen, offset,
+ copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
--
2.11.0

View File

@ -233,20 +233,7 @@ ${KERNEL_SRC}/README ${KERNEL_CFG_ORG}: ${KERNEL_SRC_SUBMODULE} | submodules
#cd ${KERNEL_SRC}; patch -p1 <../vhost-net-extend-device-allocation-to-vmalloc.patch #cd ${KERNEL_SRC}; patch -p1 <../vhost-net-extend-device-allocation-to-vmalloc.patch
cd ${KERNEL_SRC}; patch -p1 < ../kvm-dynamic-halt-polling-disable-default.patch cd ${KERNEL_SRC}; patch -p1 < ../kvm-dynamic-halt-polling-disable-default.patch
cd ${KERNEL_SRC}; patch -p1 < ../cgroup-cpuset-add-cpuset.remap_cpus.patch cd ${KERNEL_SRC}; patch -p1 < ../cgroup-cpuset-add-cpuset.remap_cpus.patch
cd ${KERNEL_SRC}; patch -p1 < ../openvswitch-Set-internal-device-max-mtu-to-ETH_MAX_M.patch
cd ${KERNEL_SRC}; patch -p1 < ../0001-netfilter-nft_set_rbtree-handle-re-addition-element-.patch # DoS from within (unpriv) containers cd ${KERNEL_SRC}; patch -p1 < ../0001-netfilter-nft_set_rbtree-handle-re-addition-element-.patch # DoS from within (unpriv) containers
cd ${KERNEL_SRC}; patch -p1 < ../CVE-2017-8890-dccp-tcp-do-not-inherit-mc_list-from-parent.patch
cd ${KERNEL_SRC}; patch -p1 < ../CVE-2017-9074-ipv6-Prevent-overrun-when-parsing-v6-header-options.patch
cd ${KERNEL_SRC}; patch -p1 < ../CVE-2017-9074-2-ipv6-Check-ip6_find_1stfragopt-return-value-properly.patch
cd ${KERNEL_SRC}; patch -p1 < ../CVE-2017-9075-sctp-do-not-inherit-ipv6_-mc-ac-fl-_list-from-parent.patch
cd ${KERNEL_SRC}; patch -p1 < ../CVE-2017-9076_9077-ipv6-dccp-do-not-inherit-ipv6_mc_list-from-parent.patch
cd ${KERNEL_SRC}; patch -p1 < ../CVE-2017-9242-ipv6-fix-out-of-bound-writes-in-__ip6_append_data.patch
cd ${KERNEL_SRC}; patch -p1 < ../pinctl-amd-ryzen-01-make-use-of-raw_spinlock-variants.patch
cd ${KERNEL_SRC}; patch -p1 < ../pinctl-amd-ryzen-02-Use-regular-interrupt-instead-of-chained.patch
cd ${KERNEL_SRC}; patch -p1 < ../CVE-2017-100364-0001-Revert-mm-enlarge-stack-guard-gap.patch
cd ${KERNEL_SRC}; patch -p1 < ../CVE-2017-100364-0002-Revert-mm-do-not-collapse-stack-gap-into-THP.patch
cd ${KERNEL_SRC}; patch -p1 < ../CVE-2017-100364-0003-mm-larger-stack-guard-gap-between-vmas.patch
cd ${KERNEL_SRC}; patch -p1 < ../CVE-2017-100364-0004-mm-fix-new-crash-in-unmapped_area_topdown.patch
sed -i ${KERNEL_SRC}/Makefile -e 's/^EXTRAVERSION.*$$/EXTRAVERSION=${EXTRAVERSION}/' sed -i ${KERNEL_SRC}/Makefile -e 's/^EXTRAVERSION.*$$/EXTRAVERSION=${EXTRAVERSION}/'
touch $@ touch $@

View File

@ -1,41 +0,0 @@
From 425df17ce3a26d98f76e2b6b0af2acf4aeb0b026 Mon Sep 17 00:00:00 2001
From: Jarno Rajahalme <jarno@ovn.org>
Date: Tue, 14 Feb 2017 21:16:28 -0800
Subject: [PATCH] openvswitch: Set internal device max mtu to ETH_MAX_MTU.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Commit 91572088e3fd ("net: use core MTU range checking in core net
infra") changed the openvswitch internal device to use the core net
infra for controlling the MTU range, but failed to actually set the
max_mtu as described in the commit message, which now defaults to
ETH_DATA_LEN.
This patch fixes this by setting max_mtu to ETH_MAX_MTU after
ether_setup() call.
Fixes: 91572088e3fd ("net: use core MTU range checking in core net infra")
Signed-off-by: Jarno Rajahalme <jarno@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
net/openvswitch/vport-internal_dev.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 09141a1..89193a6 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -149,6 +149,8 @@ static void do_setup(struct net_device *netdev)
{
ether_setup(netdev);
+ netdev->max_mtu = ETH_MAX_MTU;
+
netdev->netdev_ops = &internal_dev_netdev_ops;
netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
--
2.1.4

View File

@ -1,303 +0,0 @@
From 78388cbea036c9a9e2fd0c71e21d608cfc63939a Mon Sep 17 00:00:00 2001
From: Julia Cartwright <julia@ni.com>
Date: Thu, 1 Jun 2017 13:12:16 +0800
Subject: [PATCH] pinctrl: amd: make use of raw_spinlock variants
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
BugLink: https://bugs.launchpad.net/bugs/1671360
The amd pinctrl drivers currently implement an irq_chip for handling
interrupts; due to how irq_chip handling is done, it's necessary for the
irq_chip methods to be invoked from hardirq context, even on a a
real-time kernel. Because the spinlock_t type becomes a "sleeping"
spinlock w/ RT kernels, it is not suitable to be used with irq_chips.
A quick audit of the operations under the lock reveal that they do only
minimal, bounded work, and are therefore safe to do under a raw spinlock.
Signed-off-by: Julia Cartwright <julia@ni.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
(cherry picked from commit 229710fecdd805abb753c480778ea0de47cbb1e2)
Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Seth Forshee <seth.forshee@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
drivers/pinctrl/pinctrl-amd.h | 2 +-
drivers/pinctrl/pinctrl-amd.c | 66 +++++++++++++++++++++----------------------
2 files changed, 34 insertions(+), 34 deletions(-)
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 7bfea47dbb47..4d5d5cac48a9 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -86,7 +86,7 @@ struct amd_function {
};
struct amd_gpio {
- spinlock_t lock;
+ raw_spinlock_t lock;
void __iomem *base;
const struct amd_pingroup *groups;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 537b52055756..cfcf9db02c7d 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -41,11 +41,11 @@ static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
u32 pin_reg;
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + offset * 4);
pin_reg &= ~BIT(OUTPUT_ENABLE_OFF);
writel(pin_reg, gpio_dev->base + offset * 4);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
return 0;
}
@@ -57,7 +57,7 @@ static int amd_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
unsigned long flags;
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + offset * 4);
pin_reg |= BIT(OUTPUT_ENABLE_OFF);
if (value)
@@ -65,7 +65,7 @@ static int amd_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
else
pin_reg &= ~BIT(OUTPUT_VALUE_OFF);
writel(pin_reg, gpio_dev->base + offset * 4);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
return 0;
}
@@ -76,9 +76,9 @@ static int amd_gpio_get_value(struct gpio_chip *gc, unsigned offset)
unsigned long flags;
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + offset * 4);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
return !!(pin_reg & BIT(PIN_STS_OFF));
}
@@ -89,14 +89,14 @@ static void amd_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value)
unsigned long flags;
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + offset * 4);
if (value)
pin_reg |= BIT(OUTPUT_VALUE_OFF);
else
pin_reg &= ~BIT(OUTPUT_VALUE_OFF);
writel(pin_reg, gpio_dev->base + offset * 4);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
@@ -108,7 +108,7 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
unsigned long flags;
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + offset * 4);
if (debounce) {
@@ -159,7 +159,7 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
pin_reg &= ~DB_CNTRl_MASK;
}
writel(pin_reg, gpio_dev->base + offset * 4);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
return ret;
}
@@ -208,9 +208,9 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
for (; i < pin_num; i++) {
seq_printf(s, "pin%d\t", i);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + i * 4);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
if (pin_reg & BIT(INTERRUPT_ENABLE_OFF)) {
interrupt_enable = "interrupt is enabled|";
@@ -315,12 +315,12 @@ static void amd_gpio_irq_enable(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
pin_reg |= BIT(INTERRUPT_MASK_OFF);
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
static void amd_gpio_irq_disable(struct irq_data *d)
@@ -330,12 +330,12 @@ static void amd_gpio_irq_disable(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
pin_reg &= ~BIT(INTERRUPT_ENABLE_OFF);
pin_reg &= ~BIT(INTERRUPT_MASK_OFF);
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
static void amd_gpio_irq_mask(struct irq_data *d)
@@ -345,11 +345,11 @@ static void amd_gpio_irq_mask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
pin_reg &= ~BIT(INTERRUPT_MASK_OFF);
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
static void amd_gpio_irq_unmask(struct irq_data *d)
@@ -359,11 +359,11 @@ static void amd_gpio_irq_unmask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
pin_reg |= BIT(INTERRUPT_MASK_OFF);
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
static void amd_gpio_irq_eoi(struct irq_data *d)
@@ -373,11 +373,11 @@ static void amd_gpio_irq_eoi(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
reg |= EOI_MASK;
writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -388,7 +388,7 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
/* Ignore the settings coming from the client and
@@ -453,7 +453,7 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF;
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
return ret;
}
@@ -494,14 +494,14 @@ static void amd_gpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(chip, desc);
/*enable GPIO interrupt again*/
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
reg64 = reg;
reg64 = reg64 << 32;
reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
reg64 |= reg;
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
/*
* first 46 bits indicates interrupt status.
@@ -529,11 +529,11 @@ static void amd_gpio_irq_handler(struct irq_desc *desc)
if (handled == 0)
handle_bad_irq(desc);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
reg |= EOI_MASK;
writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
chained_irq_exit(chip, desc);
}
@@ -585,9 +585,9 @@ static int amd_pinconf_get(struct pinctrl_dev *pctldev,
struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + pin*4);
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
switch (param) {
case PIN_CONFIG_INPUT_DEBOUNCE:
arg = pin_reg & DB_TMR_OUT_MASK;
@@ -627,7 +627,7 @@ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
enum pin_config_param param;
struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctldev);
- spin_lock_irqsave(&gpio_dev->lock, flags);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
@@ -666,7 +666,7 @@ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
writel(pin_reg, gpio_dev->base + pin*4);
}
- spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
return ret;
}
@@ -734,7 +734,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
if (!gpio_dev)
return -ENOMEM;
- spin_lock_init(&gpio_dev->lock);
+ raw_spin_lock_init(&gpio_dev->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
--
2.11.0

View File

@ -1,165 +0,0 @@
From c075f8efc5513551233dabce366eed01e32b62a4 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 1 Jun 2017 13:12:18 +0800
Subject: [PATCH] pinctrl/amd: Use regular interrupt instead of chained
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
BugLink: https://bugs.launchpad.net/bugs/1671360
The AMD pinctrl driver uses a chained interrupt to demultiplex the GPIO
interrupts. Kevin Vandeventer reported, that his new AMD Ryzen locks up
hard on boot when the AMD pinctrl driver is initialized. The reason is an
interrupt storm. It's not clear whether that's caused by hardware or
firmware or both.
Using chained interrupts on X86 is a dangerous endavour. If a system is
misconfigured or the hardware buggy there is no safety net to catch an
interrupt storm.
Convert the driver to use a regular interrupt for the demultiplex
handler. This allows the interrupt storm detector to catch the malfunction
and lets the system boot up.
This should be backported to stable because it's likely that more users run
into this problem as the AMD Ryzen machines are spreading.
Reported-by: Kevin Vandeventer
Link: https://bugzilla.suse.com/show_bug.cgi?id=1034261
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
(backported from commit babdc22b0ccf4ef5a3075ce6e4afc26b7a279faf linux-next)
Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Seth Forshee <seth.forshee@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
drivers/pinctrl/pinctrl-amd.c | 90 +++++++++++++++++++------------------------
1 file changed, 40 insertions(+), 50 deletions(-)
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index cfcf9db02c7d..07a88b83e02b 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -478,64 +478,54 @@ static struct irq_chip amd_gpio_irqchip = {
.irq_set_type = amd_gpio_irq_set_type,
};
-static void amd_gpio_irq_handler(struct irq_desc *desc)
+#define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
+
+static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
{
- u32 i;
- u32 off;
- u32 reg;
- u32 pin_reg;
- u64 reg64;
- int handled = 0;
- unsigned int irq;
+ struct amd_gpio *gpio_dev = dev_id;
+ struct gpio_chip *gc = &gpio_dev->gc;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int i, irqnr;
unsigned long flags;
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ u32 *regs, regval;
+ u64 status, mask;
- chained_irq_enter(chip, desc);
- /*enable GPIO interrupt again*/
+ /* Read the wake status */
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
- reg64 = reg;
- reg64 = reg64 << 32;
-
- reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
- reg64 |= reg;
+ status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
+ status <<= 32;
+ status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
- /*
- * first 46 bits indicates interrupt status.
- * one bit represents four interrupt sources.
- */
- for (off = 0; off < 46 ; off++) {
- if (reg64 & BIT(off)) {
- for (i = 0; i < 4; i++) {
- pin_reg = readl(gpio_dev->base +
- (off * 4 + i) * 4);
- if ((pin_reg & BIT(INTERRUPT_STS_OFF)) ||
- (pin_reg & BIT(WAKE_STS_OFF))) {
- irq = irq_find_mapping(gc->irqdomain,
- off * 4 + i);
- generic_handle_irq(irq);
- writel(pin_reg,
- gpio_dev->base
- + (off * 4 + i) * 4);
- handled++;
- }
- }
+ /* Bit 0-45 contain the relevant status bits */
+ status &= (1ULL << 46) - 1;
+ regs = gpio_dev->base;
+ for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) {
+ if (!(status & mask))
+ continue;
+ status &= ~mask;
+
+ /* Each status bit covers four pins */
+ for (i = 0; i < 4; i++) {
+ regval = readl(regs + i);
+ if (!(regval & PIN_IRQ_PENDING))
+ continue;
+ irq = irq_find_mapping(gc->irqdomain, irqnr + i);
+ generic_handle_irq(irq);
+ /* Clear interrupt */
+ writel(regval, regs + i);
+ ret = IRQ_HANDLED;
}
}
- if (handled == 0)
- handle_bad_irq(desc);
-
+ /* Signal EOI to the GPIO unit */
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
- reg |= EOI_MASK;
- writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG);
+ regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
+ regval |= EOI_MASK;
+ writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
- chained_irq_exit(chip, desc);
+ return ret;
}
static int amd_get_groups_count(struct pinctrl_dev *pctldev)
@@ -803,10 +793,10 @@ static int amd_gpio_probe(struct platform_device *pdev)
goto out2;
}
- gpiochip_set_chained_irqchip(&gpio_dev->gc,
- &amd_gpio_irqchip,
- irq_base,
- amd_gpio_irq_handler);
+ ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0,
+ KBUILD_MODNAME, gpio_dev);
+ if (ret)
+ goto out2;
platform_set_drvdata(pdev, gpio_dev);
--
2.11.0