2b834b083d
Patches and rationale by Seth Forshee[1]: My testing shows that the "POWER9: Additional power9 patches" patches are responsible, two of them in particular: - mm: introduce page_vma_mapped_walk() - mm, ksm: convert write_protect_page() to use page_vma_mapped_walk() These patches don't appear to be included for any functionality they provide, but rather to make "mm/ksm: handle protnone saved writes when making page write protect" a clean cherry pick instead of a backport. But the backport isn't that difficult, so as far as I can tell we can do away with the other two patches. 1: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1674838/comments/108
71 lines
2.3 KiB
Diff
71 lines
2.3 KiB
Diff
From b4cf3c107f8f1d2da2b606e9d08e241be7000d65 Mon Sep 17 00:00:00 2001
|
|
From: Seth Forshee <seth.forshee@canonical.com>
|
|
Date: Wed, 3 May 2017 08:34:47 -0500
|
|
Subject: [PATCH 1/4] Revert "mm/ksm: handle protnone saved writes when making
|
|
page write protect"
|
|
|
|
This reverts commit c228a1037cd6bd0064472ea282e3730a342d6fca.
|
|
|
|
Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
|
|
---
|
|
include/asm-generic/pgtable.h | 8 --------
|
|
mm/ksm.c | 9 ++-------
|
|
2 files changed, 2 insertions(+), 15 deletions(-)
|
|
|
|
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
|
|
index 8c8ba48bef0b..b6f3a8a4b738 100644
|
|
--- a/include/asm-generic/pgtable.h
|
|
+++ b/include/asm-generic/pgtable.h
|
|
@@ -200,10 +200,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
|
|
#define pte_mk_savedwrite pte_mkwrite
|
|
#endif
|
|
|
|
-#ifndef pte_clear_savedwrite
|
|
-#define pte_clear_savedwrite pte_wrprotect
|
|
-#endif
|
|
-
|
|
#ifndef pmd_savedwrite
|
|
#define pmd_savedwrite pmd_write
|
|
#endif
|
|
@@ -212,10 +208,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
|
|
#define pmd_mk_savedwrite pmd_mkwrite
|
|
#endif
|
|
|
|
-#ifndef pmd_clear_savedwrite
|
|
-#define pmd_clear_savedwrite pmd_wrprotect
|
|
-#endif
|
|
-
|
|
#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
|
|
diff --git a/mm/ksm.c b/mm/ksm.c
|
|
index abc05187168a..9dd2e58fb6dc 100644
|
|
--- a/mm/ksm.c
|
|
+++ b/mm/ksm.c
|
|
@@ -880,8 +880,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
|
if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
|
|
goto out_unlock;
|
|
|
|
- if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
|
|
- (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) {
|
|
+ if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte)) {
|
|
pte_t entry;
|
|
|
|
swapped = PageSwapCache(page);
|
|
@@ -906,11 +905,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
|
}
|
|
if (pte_dirty(entry))
|
|
set_page_dirty(page);
|
|
-
|
|
- if (pte_protnone(entry))
|
|
- entry = pte_mkclean(pte_clear_savedwrite(entry));
|
|
- else
|
|
- entry = pte_mkclean(pte_wrprotect(entry));
|
|
+ entry = pte_mkclean(pte_wrprotect(entry));
|
|
set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
|
|
}
|
|
*orig_pte = *pvmw.pte;
|
|
--
|
|
2.7.4
|
|
|