From 2686d514c345243e06dba40298a13c7803400a61 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 4 Dec 2024 19:09:48 +0800 Subject: [PATCH] mm: make zap_pte_range() handle full within-PMD range In preparation for reclaiming empty PTE pages, this commit first makes zap_pte_range() to handle the full within-PMD range, so that we can more easily detect and free PTE pages in this function in subsequent commits. Link: https://lkml.kernel.org/r/76c95ee641da7808cd66d642ab95841df4048295.1733305182.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Reviewed-by: Jann Horn Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Peter Zijlstra Cc: Will Deacon Cc: Zach O'Keefe Cc: Dan Carpenter Signed-off-by: Andrew Morton --- mm/memory.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mm/memory.c b/mm/memory.c index 694156ecaf95..d4d5bd7046e7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1718,6 +1718,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, pte_t *pte; int nr; +retry: tlb_change_page_size(tlb, PAGE_SIZE); init_rss_vec(rss); start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); @@ -1757,6 +1758,13 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, if (force_flush) tlb_flush_mmu(tlb); + if (addr != end) { + cond_resched(); + force_flush = false; + force_break = false; + goto retry; + } + return addr; } -- 2.25.1