mm: move mm counter updating out of set_pte_range()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Fri, 12 Apr 2024 06:47:50 +0000 (14:47 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 6 May 2024 00:53:36 +0000 (17:53 -0700)
Patch series "mm: batch mm counter updating in filemap_map_pages()", v3.

Let's batch mm counter updating to accelerate filemap_map_pages().

This patch (of 2):

In order to support batch mm counter updating in filemap_map_pages(), move
mm counter updating out of set_pte_range(), the folios are file from
filemap, and distinguish folios by vmf->flags and vma->vm_flags from
another caller finish_fault().

Link: https://lkml.kernel.org/r/20240412064751.119015-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20240412064751.119015-2-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/filemap.c
mm/memory.c

index 21e70434d931889be640ff97ded097247d332482..5a518c5075fb33f3240412e29eeb4392db4dc879 100644 (file)
@@ -3539,6 +3539,8 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
 skip:
                if (count) {
                        set_pte_range(vmf, folio, page, count, addr);
+                       add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio),
+                                      count);
                        folio_ref_add(folio, count);
                        if (in_range(vmf->address, addr, count * PAGE_SIZE))
                                ret = VM_FAULT_NOPAGE;
@@ -3553,6 +3555,7 @@ skip:
 
        if (count) {
                set_pte_range(vmf, folio, page, count, addr);
+               add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), count);
                folio_ref_add(folio, count);
                if (in_range(vmf->address, addr, count * PAGE_SIZE))
                        ret = VM_FAULT_NOPAGE;
@@ -3589,6 +3592,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
                ret = VM_FAULT_NOPAGE;
 
        set_pte_range(vmf, folio, page, 1, addr);
+       add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), 1);
        folio_ref_inc(folio);
 
        return ret;
index b71d011105962298efaaa9d00142b0158cc60a8e..33d87b64d15db98fff7af64f83258445aef75249 100644 (file)
@@ -4687,12 +4687,10 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
                entry = pte_mkuffd_wp(entry);
        /* copy-on-write page */
        if (write && !(vma->vm_flags & VM_SHARED)) {
-               add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr);
                VM_BUG_ON_FOLIO(nr != 1, folio);
                folio_add_new_anon_rmap(folio, vma, addr);
                folio_add_lru_vma(folio, vma);
        } else {
-               add_mm_counter(vma->vm_mm, mm_counter_file(folio), nr);
                folio_add_file_rmap_ptes(folio, page, nr, vma);
        }
        set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
@@ -4729,9 +4727,11 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
        struct vm_area_struct *vma = vmf->vma;
        struct page *page;
        vm_fault_t ret;
+       bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) &&
+                     !(vma->vm_flags & VM_SHARED);
 
        /* Did we COW the page? */
-       if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
+       if (is_cow)
                page = vmf->cow_page;
        else
                page = vmf->page;
@@ -4767,8 +4767,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
        /* Re-check under ptl */
        if (likely(!vmf_pte_changed(vmf))) {
                struct folio *folio = page_folio(page);
+               int type = is_cow ? MM_ANONPAGES : mm_counter_file(folio);
 
                set_pte_range(vmf, folio, page, 1, vmf->address);
+               add_mm_counter(vma->vm_mm, type, 1);
                ret = 0;
        } else {
                update_mmu_tlb(vma, vmf->address, vmf->pte);