mm: update ptep_modify_prot_commit to take old pte value as arg
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Tue, 5 Mar 2019 23:46:29 +0000 (15:46 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 6 Mar 2019 05:07:18 +0000 (21:07 -0800)
Architectures like ppc64 require to do a conditional tlb flush based on
the old and new value of pte.  Enable that by passing old pte value as
the arg.

Link: http://lkml.kernel.org/r/20190116085035.29729-3-aneesh.kumar@linux.ibm.com
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/s390/include/asm/pgtable.h
arch/s390/mm/pgtable.c
arch/x86/include/asm/paravirt.h
fs/proc/task_mmu.c
include/asm-generic/pgtable.h
mm/memory.c
mm/mprotect.c

index 5d730199e37b6f1a6943d38cc957665822ee0b09..76dc344edb8cfd9c661b456d8c19afa54d0fa530 100644 (file)
@@ -1070,7 +1070,8 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 
 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
-void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long, pte_t *, pte_t);
+void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
+                            pte_t *, pte_t, pte_t);
 
 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
index 71aa011707687b3d463882811db1b06fff0e5553..8485d6dc275496ab69d76cfa29bb2ae66c14e9b5 100644 (file)
@@ -321,7 +321,7 @@ pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
 }
 
 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
-                            pte_t *ptep, pte_t pte)
+                            pte_t *ptep, pte_t old_pte, pte_t pte)
 {
        pgste_t pgste;
        struct mm_struct *mm = vma->vm_mm;
index c5a7f18cce7eb196f06ecbe3ad56c57b1ab2015a..c25c38a05c1c944f7e7c7c51509bee0499705573 100644 (file)
@@ -433,7 +433,7 @@ static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned
 }
 
 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
-                                          pte_t *ptep, pte_t pte)
+                                          pte_t *ptep, pte_t old_pte, pte_t pte)
 {
 
        if (sizeof(pteval_t) > sizeof(long))
index 9c2ef731dd5fed2cb9d6c6317589d381e696cedf..beccb0b1d57c77ca3462513ef9912822ff7740bc 100644 (file)
@@ -948,10 +948,12 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
        pte_t ptent = *pte;
 
        if (pte_present(ptent)) {
-               ptent = ptep_modify_prot_start(vma, addr, pte);
-               ptent = pte_wrprotect(ptent);
+               pte_t old_pte;
+
+               old_pte = ptep_modify_prot_start(vma, addr, pte);
+               ptent = pte_wrprotect(old_pte);
                ptent = pte_clear_soft_dirty(ptent);
-               ptep_modify_prot_commit(vma, addr, pte, ptent);
+               ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
        } else if (is_swap_pte(ptent)) {
                ptent = pte_swp_clear_soft_dirty(ptent);
                set_pte_at(vma->vm_mm, addr, pte, ptent);
index 8b0e933efe2634989cb6e4322b5dc2347162a75c..fa782fba51eebac4a3776ed1c9565abbeeb309c6 100644 (file)
@@ -657,7 +657,7 @@ static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
  */
 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
                                           unsigned long addr,
-                                          pte_t *ptep, pte_t pte)
+                                          pte_t *ptep, pte_t old_pte, pte_t pte)
 {
        __ptep_modify_prot_commit(vma, addr, ptep, pte);
 }
index 5ade52502ea03a978bd5b7ee637b608164a08b66..557c6fffedd188ec7f54394167a8e5a712726872 100644 (file)
@@ -3599,7 +3599,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
        int last_cpupid;
        int target_nid;
        bool migrated = false;
-       pte_t pte;
+       pte_t pte, old_pte;
        bool was_writable = pte_savedwrite(vmf->orig_pte);
        int flags = 0;
 
@@ -3619,12 +3619,12 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
         * Make it present again, Depending on how arch implementes non
         * accessible ptes, some can allow access by kernel mode.
         */
-       pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
-       pte = pte_modify(pte, vma->vm_page_prot);
+       old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
+       pte = pte_modify(old_pte, vma->vm_page_prot);
        pte = pte_mkyoung(pte);
        if (was_writable)
                pte = pte_mkwrite(pte);
-       ptep_modify_prot_commit(vma, vmf->address, vmf->pte, pte);
+       ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
        update_mmu_cache(vma, vmf->address, vmf->pte);
 
        page = vm_normal_page(vma, vmf->address, pte);
index c89ce07923c8346bb0c04d260022680b763a7725..028c724dcb1ae47337127517d9f3181caa8f4728 100644 (file)
@@ -110,8 +110,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                                        continue;
                        }
 
-                       ptent = ptep_modify_prot_start(vma, addr, pte);
-                       ptent = pte_modify(ptent, newprot);
+                       oldpte = ptep_modify_prot_start(vma, addr, pte);
+                       ptent = pte_modify(oldpte, newprot);
                        if (preserve_write)
                                ptent = pte_mk_savedwrite(ptent);
 
@@ -121,7 +121,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                                         !(vma->vm_flags & VM_SOFTDIRTY))) {
                                ptent = pte_mkwrite(ptent);
                        }
-                       ptep_modify_prot_commit(vma, addr, pte, ptent);
+                       ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
                        pages++;
                } else if (IS_ENABLED(CONFIG_MIGRATION)) {
                        swp_entry_t entry = pte_to_swp_entry(oldpte);