mm: factor out the numa mapping rebuilding into a new helper
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Fri, 29 Mar 2024 06:56:45 +0000 (14:56 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:30 +0000 (20:56 -0700)
Patch series "support multi-size THP numa balancing", v2.

This patchset tries to support mTHP numa balancing, as a simple solution
to start, the NUMA balancing algorithm for mTHP will follow the THP
strategy as the basic support.  Please find details in each patch.

This patch (of 2):

To support large folio's numa balancing, factor out the numa mapping
rebuilding into a new helper as a preparation.

Link: https://lkml.kernel.org/r/cover.1712132950.git.baolin.wang@linux.alibaba.com
Link: https://lkml.kernel.org/r/cover.1711683069.git.baolin.wang@linux.alibaba.com
Link: https://lkml.kernel.org/r/8bc2586bdd8dbbe6d83c09b77b360ec8fcac3736.1711683069.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 93eaeaf5ca9dda103b2bb1dfecb17c6925032900..77c69bf297eea47690e2214c89d58b63ca39a56d 100644 (file)
@@ -5063,6 +5063,20 @@ int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
        return mpol_misplaced(folio, vmf, addr);
 }
 
+static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
+                                       bool writable)
+{
+       pte_t pte, old_pte;
+
+       old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
+       pte = pte_modify(old_pte, vma->vm_page_prot);
+       pte = pte_mkyoung(pte);
+       if (writable)
+               pte = pte_mkwrite(pte, vma);
+       ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
+       update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
+}
+
 static vm_fault_t do_numa_page(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
@@ -5168,13 +5182,7 @@ out_map:
         * Make it present again, depending on how arch implements
         * non-accessible ptes, some can allow access by kernel mode.
         */
-       old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
-       pte = pte_modify(old_pte, vma->vm_page_prot);
-       pte = pte_mkyoung(pte);
-       if (writable)
-               pte = pte_mkwrite(pte, vma);
-       ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
-       update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
+       numa_rebuild_single_mapping(vmf, vma, writable);
        pte_unmap_unlock(vmf->pte, vmf->ptl);
        goto out;
 }