mm/madvise: batch tlb flushes for MADV_DONTNEED[_LOCKED]
authorSeongJae Park <sj@kernel.org>
Thu, 10 Apr 2025 00:00:22 +0000 (17:00 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 12 May 2025 00:48:27 +0000 (17:48 -0700)
MADV_DONTNEED[_LOCKED] handling for [process_]madvise() flushes tlb for
each vma of each address range.  Update the logic to do tlb flushes in a
batched way.  Initialize an mmu_gather object from do_madvise() and
vector_madvise(), which are the entry level functions for
[process_]madvise(), respectively.  And pass those objects to the function
for per-vma work, via madvise_behavior struct.  Make the per-vma logic not
flushes tlb on their own but just saves the tlb entries to the received
mmu_gather object.  For this internal logic change, make
zap_page_range_single_batched() non-static and use it directly from
madvise_dontneed_single_vma().  Finally, the entry level functions flush
the tlb entries that gathered for the entire user request, at once.

Link: https://lkml.kernel.org/r/20250410000022.1901-5-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/madvise.c
mm/memory.c

index 0cf1f534ee1a52800bcabe3c791eb088ac7632c8..780481a8be0ee219a75bd96ae9a464dea6c3aab8 100644 (file)
@@ -430,6 +430,9 @@ void unmap_page_range(struct mmu_gather *tlb,
                             struct vm_area_struct *vma,
                             unsigned long addr, unsigned long end,
                             struct zap_details *details);
+void zap_page_range_single_batched(struct mmu_gather *tlb,
+               struct vm_area_struct *vma, unsigned long addr,
+               unsigned long size, struct zap_details *details);
 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
                           gfp_t gfp);
 
index 951038a9f36fa8c2e4e4d1c33f92e77112cb26e7..8433ac9b27e097727155e7998a138b293925470a 100644 (file)
@@ -851,7 +851,8 @@ static int madvise_free_single_vma(struct madvise_behavior *madv_behavior,
  * An interface that causes the system to free clean pages and flush
  * dirty pages is already available as msync(MS_INVALIDATE).
  */
-static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
+static long madvise_dontneed_single_vma(struct madvise_behavior *madv_behavior,
+                                       struct vm_area_struct *vma,
                                        unsigned long start, unsigned long end)
 {
        struct zap_details details = {
@@ -859,7 +860,8 @@ static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
                .even_cows = true,
        };
 
-       zap_page_range_single(vma, start, end - start, &details);
+       zap_page_range_single_batched(
+                       madv_behavior->tlb, vma, start, end - start, &details);
        return 0;
 }
 
@@ -950,7 +952,8 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
        }
 
        if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
-               return madvise_dontneed_single_vma(vma, start, end);
+               return madvise_dontneed_single_vma(
+                               madv_behavior, vma, start, end);
        else if (behavior == MADV_FREE)
                return madvise_free_single_vma(madv_behavior, vma, start, end);
        else
@@ -1628,6 +1631,8 @@ static void madvise_unlock(struct mm_struct *mm, int behavior)
 static bool madvise_batch_tlb_flush(int behavior)
 {
        switch (behavior) {
+       case MADV_DONTNEED:
+       case MADV_DONTNEED_LOCKED:
        case MADV_FREE:
                return true;
        default:
index 33d34722e3393283343fc3e5422800fb74cf8ad0..71c255f3fdccf8ba35ca0037023235fe89fe6bf6 100644 (file)
@@ -1998,7 +1998,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
        mmu_notifier_invalidate_range_end(&range);
 }
 
-/*
+/**
  * zap_page_range_single_batched - remove user pages in a given range
  * @tlb: pointer to the caller's struct mmu_gather
  * @vma: vm_area_struct holding the applicable pages
@@ -2009,7 +2009,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
  * @tlb shouldn't be NULL.  The range must fit into one VMA.  If @vma is for
  * hugetlb, @tlb is flushed and re-initialized by this function.
  */
-static void zap_page_range_single_batched(struct mmu_gather *tlb,
+void zap_page_range_single_batched(struct mmu_gather *tlb,
                struct vm_area_struct *vma, unsigned long address,
                unsigned long size, struct zap_details *details)
 {