mm: mprotect: use a folio in change_pte_range()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Wed, 18 Oct 2023 14:07:56 +0000 (22:07 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 25 Oct 2023 23:47:12 +0000 (16:47 -0700)
Use a folio in change_pte_range() to save three compound_head() calls.
Since now only normal and PMD-mapped page is handled by numa balancing,
it is enough to only update the entire folio's access time.

Link: https://lkml.kernel.org/r/20231018140806.2783514-10-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mprotect.c

index f1dc8f8c84ef3fe3906ef36e1ee842d0d6b4311b..81991102f7859e94cacd9670d42891038e92b214 100644 (file)
@@ -114,7 +114,7 @@ static long change_pte_range(struct mmu_gather *tlb,
                         * pages. See similar comment in change_huge_pmd.
                         */
                        if (prot_numa) {
-                               struct page *page;
+                               struct folio *folio;
                                int nid;
                                bool toptier;
 
@@ -122,13 +122,14 @@ static long change_pte_range(struct mmu_gather *tlb,
                                if (pte_protnone(oldpte))
                                        continue;
 
-                               page = vm_normal_page(vma, addr, oldpte);
-                               if (!page || is_zone_device_page(page) || PageKsm(page))
+                               folio = vm_normal_folio(vma, addr, oldpte);
+                               if (!folio || folio_is_zone_device(folio) ||
+                                   folio_test_ksm(folio))
                                        continue;
 
                                /* Also skip shared copy-on-write pages */
                                if (is_cow_mapping(vma->vm_flags) &&
-                                   page_count(page) != 1)
+                                   folio_ref_count(folio) != 1)
                                        continue;
 
                                /*
@@ -136,14 +137,15 @@ static long change_pte_range(struct mmu_gather *tlb,
                                 * it cannot move them all from MIGRATE_ASYNC
                                 * context.
                                 */
-                               if (page_is_file_lru(page) && PageDirty(page))
+                               if (folio_is_file_lru(folio) &&
+                                   folio_test_dirty(folio))
                                        continue;
 
                                /*
                                 * Don't mess with PTEs if page is already on the node
                                 * a single-threaded process is running on.
                                 */
-                               nid = page_to_nid(page);
+                               nid = folio_nid(folio);
                                if (target_node == nid)
                                        continue;
                                toptier = node_is_toptier(nid);
@@ -157,7 +159,7 @@ static long change_pte_range(struct mmu_gather *tlb,
                                        continue;
                                if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
                                    !toptier)
-                                       xchg_page_access_time(page,
+                                       folio_xchg_access_time(folio,
                                                jiffies_to_msecs(jiffies));
                        }