Merge tag 'riscv/for-v5.4-rc1-b' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / fs / proc / task_mmu.c
index 731642e0f5a0fb42c13873bf3abfc21cedb67dd9..9442631fd4afcb16e3a09d6a661ebdc630fcbe87 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-#include <linux/mm.h>
+#include <linux/pagewalk.h>
 #include <linux/vmacache.h>
 #include <linux/hugetlb.h>
 #include <linux/huge_mm.h>
@@ -417,6 +417,7 @@ struct mem_size_stats {
        unsigned long lazyfree;
        unsigned long anonymous_thp;
        unsigned long shmem_thp;
+       unsigned long file_thp;
        unsigned long swap;
        unsigned long shared_hugetlb;
        unsigned long private_hugetlb;
@@ -461,7 +462,7 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
 static void smaps_account(struct mem_size_stats *mss, struct page *page,
                bool compound, bool young, bool dirty, bool locked)
 {
-       int i, nr = compound ? 1 << compound_order(page) : 1;
+       int i, nr = compound ? compound_nr(page) : 1;
        unsigned long size = nr * PAGE_SIZE;
 
        /*
@@ -513,7 +514,9 @@ static int smaps_pte_hole(unsigned long addr, unsigned long end,
 
        return 0;
 }
-#endif
+#else
+#define smaps_pte_hole         NULL
+#endif /* CONFIG_SHMEM */
 
 static void smaps_pte_entry(pte_t *pte, unsigned long addr,
                struct mm_walk *walk)
@@ -586,7 +589,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
        else if (is_zone_device_page(page))
                /* pass */;
        else
-               VM_BUG_ON_PAGE(1, page);
+               mss->file_thp += HPAGE_PMD_SIZE;
        smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
 }
 #else
@@ -729,21 +732,24 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
        }
        return 0;
 }
+#else
+#define smaps_hugetlb_range    NULL
 #endif /* HUGETLB_PAGE */
 
+static const struct mm_walk_ops smaps_walk_ops = {
+       .pmd_entry              = smaps_pte_range,
+       .hugetlb_entry          = smaps_hugetlb_range,
+};
+
+static const struct mm_walk_ops smaps_shmem_walk_ops = {
+       .pmd_entry              = smaps_pte_range,
+       .hugetlb_entry          = smaps_hugetlb_range,
+       .pte_hole               = smaps_pte_hole,
+};
+
 static void smap_gather_stats(struct vm_area_struct *vma,
                             struct mem_size_stats *mss)
 {
-       struct mm_walk smaps_walk = {
-               .pmd_entry = smaps_pte_range,
-#ifdef CONFIG_HUGETLB_PAGE
-               .hugetlb_entry = smaps_hugetlb_range,
-#endif
-               .mm = vma->vm_mm,
-       };
-
-       smaps_walk.private = mss;
-
 #ifdef CONFIG_SHMEM
        /* In case of smaps_rollup, reset the value from previous vma */
        mss->check_shmem_swap = false;
@@ -765,12 +771,13 @@ static void smap_gather_stats(struct vm_area_struct *vma,
                        mss->swap += shmem_swapped;
                } else {
                        mss->check_shmem_swap = true;
-                       smaps_walk.pte_hole = smaps_pte_hole;
+                       walk_page_vma(vma, &smaps_shmem_walk_ops, mss);
+                       return;
                }
        }
 #endif
        /* mmap_sem is held in m_start */
-       walk_page_vma(vma, &smaps_walk);
+       walk_page_vma(vma, &smaps_walk_ops, mss);
 }
 
 #define SEQ_PUT_DEC(str, val) \
@@ -803,6 +810,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
        SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
        SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
        SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
+       SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp);
        SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
        seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
                                  mss->private_hugetlb >> 10, 7);
@@ -1118,6 +1126,11 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end,
        return 0;
 }
 
+static const struct mm_walk_ops clear_refs_walk_ops = {
+       .pmd_entry              = clear_refs_pte_range,
+       .test_walk              = clear_refs_test_walk,
+};
+
 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                                size_t count, loff_t *ppos)
 {
@@ -1151,12 +1164,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                struct clear_refs_private cp = {
                        .type = type,
                };
-               struct mm_walk clear_refs_walk = {
-                       .pmd_entry = clear_refs_pte_range,
-                       .test_walk = clear_refs_test_walk,
-                       .mm = mm,
-                       .private = &cp,
-               };
 
                if (type == CLEAR_REFS_MM_HIWATER_RSS) {
                        if (down_write_killable(&mm->mmap_sem)) {
@@ -1217,7 +1224,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                                                0, NULL, mm, 0, -1UL);
                        mmu_notifier_invalidate_range_start(&range);
                }
-               walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
+               walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
+                               &cp);
                if (type == CLEAR_REFS_SOFT_DIRTY)
                        mmu_notifier_invalidate_range_end(&range);
                tlb_finish_mmu(&tlb, 0, -1);
@@ -1489,8 +1497,16 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
 
        return err;
 }
+#else
+#define pagemap_hugetlb_range  NULL
 #endif /* HUGETLB_PAGE */
 
+static const struct mm_walk_ops pagemap_ops = {
+       .pmd_entry      = pagemap_pmd_range,
+       .pte_hole       = pagemap_pte_hole,
+       .hugetlb_entry  = pagemap_hugetlb_range,
+};
+
 /*
  * /proc/pid/pagemap - an array mapping virtual pages to pfns
  *
@@ -1522,7 +1538,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
 {
        struct mm_struct *mm = file->private_data;
        struct pagemapread pm;
-       struct mm_walk pagemap_walk = {};
        unsigned long src;
        unsigned long svpfn;
        unsigned long start_vaddr;
@@ -1550,14 +1565,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
        if (!pm.buffer)
                goto out_mm;
 
-       pagemap_walk.pmd_entry = pagemap_pmd_range;
-       pagemap_walk.pte_hole = pagemap_pte_hole;
-#ifdef CONFIG_HUGETLB_PAGE
-       pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
-#endif
-       pagemap_walk.mm = mm;
-       pagemap_walk.private = &pm;
-
        src = *ppos;
        svpfn = src / PM_ENTRY_BYTES;
        start_vaddr = svpfn << PAGE_SHIFT;
@@ -1586,7 +1593,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
                ret = down_read_killable(&mm->mmap_sem);
                if (ret)
                        goto out_free;
-               ret = walk_page_range(start_vaddr, end, &pagemap_walk);
+               ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
                up_read(&mm->mmap_sem);
                start_vaddr = end;
 
@@ -1798,6 +1805,11 @@ static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
 }
 #endif
 
+static const struct mm_walk_ops show_numa_ops = {
+       .hugetlb_entry = gather_hugetlb_stats,
+       .pmd_entry = gather_pte_stats,
+};
+
 /*
  * Display pages allocated per node and memory policy via /proc.
  */
@@ -1809,12 +1821,6 @@ static int show_numa_map(struct seq_file *m, void *v)
        struct numa_maps *md = &numa_priv->md;
        struct file *file = vma->vm_file;
        struct mm_struct *mm = vma->vm_mm;
-       struct mm_walk walk = {
-               .hugetlb_entry = gather_hugetlb_stats,
-               .pmd_entry = gather_pte_stats,
-               .private = md,
-               .mm = mm,
-       };
        struct mempolicy *pol;
        char buffer[64];
        int nid;
@@ -1848,7 +1854,7 @@ static int show_numa_map(struct seq_file *m, void *v)
                seq_puts(m, " huge");
 
        /* mmap_sem is held by m_start */
-       walk_page_vma(vma, &walk);
+       walk_page_vma(vma, &show_numa_ops, md);
 
        if (!md->pages)
                goto out;