Merge tag 'mm-stable-2023-08-28-18-26' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / mm / khugepaged.c
index 47d1d32c734f4d739b4b9c54ba79d6167b4a47d3..88433cc25d8a5ab1095c931ea47786cd7e4ec2b4 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/page_table_check.h>
 #include <linux/swapops.h>
 #include <linux/shmem_fs.h>
+#include <linux/ksm.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -92,8 +93,6 @@ static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
 
 static struct kmem_cache *mm_slot_cache __read_mostly;
 
-#define MAX_PTE_MAPPED_THP 8
-
 struct collapse_control {
        bool is_khugepaged;
 
@@ -107,15 +106,9 @@ struct collapse_control {
 /**
  * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
  * @slot: hash lookup from mm to mm_slot
- * @nr_pte_mapped_thp: number of pte mapped THP
- * @pte_mapped_thp: address array corresponding pte mapped THP
  */
 struct khugepaged_mm_slot {
        struct mm_slot slot;
-
-       /* pte-mapped THP in this mm */
-       int nr_pte_mapped_thp;
-       unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
 };
 
 /**
@@ -709,6 +702,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
                                spin_lock(ptl);
                                ptep_clear(vma->vm_mm, address, _pte);
                                spin_unlock(ptl);
+                               ksm_might_unmap_zero_page(vma->vm_mm, pteval);
                        }
                } else {
                        src_page = pte_page(pteval);
@@ -902,7 +896,7 @@ static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
                return false;
        }
 
-       prep_transhuge_page(*hpage);
+       folio_prep_large_rmappable((struct folio *)*hpage);
        count_vm_event(THP_COLLAPSE_ALLOC);
        return true;
 }
@@ -1439,51 +1433,7 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
 }
 
 #ifdef CONFIG_SHMEM
-/*
- * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
- * khugepaged should try to collapse the page table.
- *
- * Note that following race exists:
- * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
- *     emptying the A's ->pte_mapped_thp[] array.
- * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
- *     retract_page_tables() finds a VMA in mm_struct A mapping the same extent
- *     (at virtual address X) and adds an entry (for X) into mm_struct A's
- *     ->pte-mapped_thp[] array.
- * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
- *     sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
- *     (for X) into mm_struct A's ->pte-mapped_thp[] array.
- * Thus, it's possible the same address is added multiple times for the same
- * mm_struct.  Should this happen, we'll simply attempt
- * collapse_pte_mapped_thp() multiple times for the same address, under the same
- * exclusive mmap_lock, and assuming the first call is successful, subsequent
- * attempts will return quickly (without grabbing any additional locks) when
- * a huge pmd is found in find_pmd_or_thp_or_none().  Since this is a cheap
- * check, and since this is a rare occurrence, the cost of preventing this
- * "multiple-add" is thought to be more expensive than just handling it, should
- * it occur.
- */
-static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
-                                         unsigned long addr)
-{
-       struct khugepaged_mm_slot *mm_slot;
-       struct mm_slot *slot;
-       bool ret = false;
-
-       VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
-
-       spin_lock(&khugepaged_mm_lock);
-       slot = mm_slot_lookup(mm_slots_hash, mm);
-       mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
-       if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
-               mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
-               ret = true;
-       }
-       spin_unlock(&khugepaged_mm_lock);
-       return ret;
-}
-
-/* hpage must be locked, and mmap_lock must be held in write */
+/* hpage must be locked, and mmap_lock must be held */
 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
                        pmd_t *pmdp, struct page *hpage)
 {
@@ -1495,7 +1445,7 @@ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
        };
 
        VM_BUG_ON(!PageTransHuge(hpage));
-       mmap_assert_write_locked(vma->vm_mm);
+       mmap_assert_locked(vma->vm_mm);
 
        if (do_set_pmd(&vmf, hpage))
                return SCAN_FAIL;
@@ -1504,48 +1454,6 @@ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
        return SCAN_SUCCEED;
 }
 
-/*
- * A note about locking:
- * Trying to take the page table spinlocks would be useless here because those
- * are only used to synchronize:
- *
- *  - modifying terminal entries (ones that point to a data page, not to another
- *    page table)
- *  - installing *new* non-terminal entries
- *
- * Instead, we need roughly the same kind of protection as free_pgtables() or
- * mm_take_all_locks() (but only for a single VMA):
- * The mmap lock together with this VMA's rmap locks covers all paths towards
- * the page table entries we're messing with here, except for hardware page
- * table walks and lockless_pages_from_mm().
- */
-static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
-                                 unsigned long addr, pmd_t *pmdp)
-{
-       pmd_t pmd;
-       struct mmu_notifier_range range;
-
-       mmap_assert_write_locked(mm);
-       if (vma->vm_file)
-               lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
-       /*
-        * All anon_vmas attached to the VMA have the same root and are
-        * therefore locked by the same lock.
-        */
-       if (vma->anon_vma)
-               lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
-
-       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
-                               addr + HPAGE_PMD_SIZE);
-       mmu_notifier_invalidate_range_start(&range);
-       pmd = pmdp_collapse_flush(vma, addr, pmdp);
-       tlb_remove_table_sync_one();
-       mmu_notifier_invalidate_range_end(&range);
-       mm_dec_nr_ptes(mm);
-       page_table_check_pte_clear_range(mm, addr, pmd);
-       pte_free(mm, pmd_pgtable(pmd));
-}
-
 /**
  * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
  * address haddr.
@@ -1561,26 +1469,29 @@ static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *v
 int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
                            bool install_pmd)
 {
+       struct mmu_notifier_range range;
+       bool notified = false;
        unsigned long haddr = addr & HPAGE_PMD_MASK;
        struct vm_area_struct *vma = vma_lookup(mm, haddr);
        struct page *hpage;
        pte_t *start_pte, *pte;
-       pmd_t *pmd;
-       spinlock_t *ptl;
-       int count = 0, result = SCAN_FAIL;
+       pmd_t *pmd, pgt_pmd;
+       spinlock_t *pml = NULL, *ptl;
+       int nr_ptes = 0, result = SCAN_FAIL;
        int i;
 
-       mmap_assert_write_locked(mm);
+       mmap_assert_locked(mm);
+
+       /* First check VMA found, in case page tables are being torn down */
+       if (!vma || !vma->vm_file ||
+           !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
+               return SCAN_VMA_CHECK;
 
        /* Fast check before locking page if already PMD-mapped */
        result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
        if (result == SCAN_PMD_MAPPED)
                return result;
 
-       if (!vma || !vma->vm_file ||
-           !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
-               return SCAN_VMA_CHECK;
-
        /*
         * If we are here, we've succeeded in replacing all the native pages
         * in the page cache with a single hugepage. If a mm were to fault-in
@@ -1610,41 +1521,24 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
                goto drop_hpage;
        }
 
+       result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
        switch (result) {
        case SCAN_SUCCEED:
                break;
        case SCAN_PMD_NONE:
                /*
-                * In MADV_COLLAPSE path, possible race with khugepaged where
-                * all pte entries have been removed and pmd cleared.  If so,
-                * skip all the pte checks and just update the pmd mapping.
+                * All pte entries have been removed and pmd cleared.
+                * Skip all the pte checks and just update the pmd mapping.
                 */
                goto maybe_install_pmd;
        default:
                goto drop_hpage;
        }
 
-       /* Lock the vma before taking i_mmap and page table locks */
-       vma_start_write(vma);
-
-       /*
-        * We need to lock the mapping so that from here on, only GUP-fast and
-        * hardware page walks can access the parts of the page tables that
-        * we're operating on.
-        * See collapse_and_free_pmd().
-        */
-       i_mmap_lock_write(vma->vm_file->f_mapping);
-
-       /*
-        * This spinlock should be unnecessary: Nobody else should be accessing
-        * the page tables under spinlock protection here, only
-        * lockless_pages_from_mm() and the hardware page walker can access page
-        * tables while all the high-level locks are held in write mode.
-        */
        result = SCAN_FAIL;
        start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
-       if (!start_pte)
-               goto drop_immap;
+       if (!start_pte)         /* mmap_lock + page lock should prevent this */
+               goto drop_hpage;
 
        /* step 1: check all mapped PTEs are to the right huge page */
        for (i = 0, addr = haddr, pte = start_pte;
@@ -1671,10 +1565,34 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
                 */
                if (hpage + i != page)
                        goto abort;
-               count++;
        }
 
-       /* step 2: adjust rmap */
+       pte_unmap_unlock(start_pte, ptl);
+       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
+                               haddr, haddr + HPAGE_PMD_SIZE);
+       mmu_notifier_invalidate_range_start(&range);
+       notified = true;
+
+       /*
+        * pmd_lock covers a wider range than ptl, and (if split from mm's
+        * page_table_lock) ptl nests inside pml. The less time we hold pml,
+        * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
+        * inserts a valid as-if-COWed PTE without even looking up page cache.
+        * So page lock of hpage does not protect from it, so we must not drop
+        * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
+        */
+       if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
+               pml = pmd_lock(mm, pmd);
+
+       start_pte = pte_offset_map_nolock(mm, pmd, haddr, &ptl);
+       if (!start_pte)         /* mmap_lock + page lock should prevent this */
+               goto abort;
+       if (!pml)
+               spin_lock(ptl);
+       else if (ptl != pml)
+               spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+
+       /* step 2: clear page table and adjust rmap */
        for (i = 0, addr = haddr, pte = start_pte;
             i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
                struct page *page;
@@ -1682,189 +1600,164 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 
                if (pte_none(ptent))
                        continue;
+               /*
+                * We dropped ptl after the first scan, to do the mmu_notifier:
+                * page lock stops more PTEs of the hpage being faulted in, but
+                * does not stop write faults COWing anon copies from existing
+                * PTEs; and does not stop those being swapped out or migrated.
+                */
+               if (!pte_present(ptent)) {
+                       result = SCAN_PTE_NON_PRESENT;
+                       goto abort;
+               }
                page = vm_normal_page(vma, addr, ptent);
-               if (WARN_ON_ONCE(page && is_zone_device_page(page)))
+               if (hpage + i != page)
                        goto abort;
+
+               /*
+                * Must clear entry, or a racing truncate may re-remove it.
+                * TLB flush can be left until pmdp_collapse_flush() does it.
+                * PTE dirty? Shmem page is already dirty; file is read-only.
+                */
+               ptep_clear(mm, addr, pte);
                page_remove_rmap(page, vma, false);
+               nr_ptes++;
        }
 
-       pte_unmap_unlock(start_pte, ptl);
+       pte_unmap(start_pte);
+       if (!pml)
+               spin_unlock(ptl);
 
        /* step 3: set proper refcount and mm_counters. */
-       if (count) {
-               page_ref_sub(hpage, count);
-               add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
+       if (nr_ptes) {
+               page_ref_sub(hpage, nr_ptes);
+               add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
        }
 
-       /* step 4: remove pte entries */
-       /* we make no change to anon, but protect concurrent anon page lookup */
-       if (vma->anon_vma)
-               anon_vma_lock_write(vma->anon_vma);
+       /* step 4: remove empty page table */
+       if (!pml) {
+               pml = pmd_lock(mm, pmd);
+               if (ptl != pml)
+                       spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+       }
+       pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
+       pmdp_get_lockless_sync();
+       if (ptl != pml)
+               spin_unlock(ptl);
+       spin_unlock(pml);
 
-       collapse_and_free_pmd(mm, vma, haddr, pmd);
+       mmu_notifier_invalidate_range_end(&range);
 
-       if (vma->anon_vma)
-               anon_vma_unlock_write(vma->anon_vma);
-       i_mmap_unlock_write(vma->vm_file->f_mapping);
+       mm_dec_nr_ptes(mm);
+       page_table_check_pte_clear_range(mm, haddr, pgt_pmd);
+       pte_free_defer(mm, pmd_pgtable(pgt_pmd));
 
 maybe_install_pmd:
        /* step 5: install pmd entry */
        result = install_pmd
                        ? set_huge_pmd(vma, haddr, pmd, hpage)
                        : SCAN_SUCCEED;
-
+       goto drop_hpage;
+abort:
+       if (nr_ptes) {
+               flush_tlb_mm(mm);
+               page_ref_sub(hpage, nr_ptes);
+               add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
+       }
+       if (start_pte)
+               pte_unmap_unlock(start_pte, ptl);
+       if (pml && pml != ptl)
+               spin_unlock(pml);
+       if (notified)
+               mmu_notifier_invalidate_range_end(&range);
 drop_hpage:
        unlock_page(hpage);
        put_page(hpage);
        return result;
-
-abort:
-       pte_unmap_unlock(start_pte, ptl);
-drop_immap:
-       i_mmap_unlock_write(vma->vm_file->f_mapping);
-       goto drop_hpage;
-}
-
-static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
-{
-       struct mm_slot *slot = &mm_slot->slot;
-       struct mm_struct *mm = slot->mm;
-       int i;
-
-       if (likely(mm_slot->nr_pte_mapped_thp == 0))
-               return;
-
-       if (!mmap_write_trylock(mm))
-               return;
-
-       if (unlikely(hpage_collapse_test_exit(mm)))
-               goto out;
-
-       for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
-               collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
-
-out:
-       mm_slot->nr_pte_mapped_thp = 0;
-       mmap_write_unlock(mm);
 }
 
-static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
-                              struct mm_struct *target_mm,
-                              unsigned long target_addr, struct page *hpage,
-                              struct collapse_control *cc)
+static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
 {
        struct vm_area_struct *vma;
-       int target_result = SCAN_FAIL;
 
-       i_mmap_lock_write(mapping);
+       i_mmap_lock_read(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
-               int result = SCAN_FAIL;
-               struct mm_struct *mm = NULL;
-               unsigned long addr = 0;
-               pmd_t *pmd;
-               bool is_target = false;
+               struct mmu_notifier_range range;
+               struct mm_struct *mm;
+               unsigned long addr;
+               pmd_t *pmd, pgt_pmd;
+               spinlock_t *pml;
+               spinlock_t *ptl;
+               bool skipped_uffd = false;
 
                /*
                 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
-                * got written to. These VMAs are likely not worth investing
-                * mmap_write_lock(mm) as PMD-mapping is likely to be split
-                * later.
-                *
-                * Note that vma->anon_vma check is racy: it can be set up after
-                * the check but before we took mmap_lock by the fault path.
-                * But page lock would prevent establishing any new ptes of the
-                * page, so we are safe.
-                *
-                * An alternative would be drop the check, but check that page
-                * table is clear before calling pmdp_collapse_flush() under
-                * ptl. It has higher chance to recover THP for the VMA, but
-                * has higher cost too. It would also probably require locking
-                * the anon_vma.
+                * got written to. These VMAs are likely not worth removing
+                * page tables from, as PMD-mapping is likely to be split later.
                 */
-               if (READ_ONCE(vma->anon_vma)) {
-                       result = SCAN_PAGE_ANON;
-                       goto next;
-               }
+               if (READ_ONCE(vma->anon_vma))
+                       continue;
+
                addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
                if (addr & ~HPAGE_PMD_MASK ||
-                   vma->vm_end < addr + HPAGE_PMD_SIZE) {
-                       result = SCAN_VMA_CHECK;
-                       goto next;
-               }
+                   vma->vm_end < addr + HPAGE_PMD_SIZE)
+                       continue;
+
                mm = vma->vm_mm;
-               is_target = mm == target_mm && addr == target_addr;
-               result = find_pmd_or_thp_or_none(mm, addr, &pmd);
-               if (result != SCAN_SUCCEED)
-                       goto next;
+               if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
+                       continue;
+
+               if (hpage_collapse_test_exit(mm))
+                       continue;
                /*
-                * We need exclusive mmap_lock to retract page table.
-                *
-                * We use trylock due to lock inversion: we need to acquire
-                * mmap_lock while holding page lock. Fault path does it in
-                * reverse order. Trylock is a way to avoid deadlock.
-                *
-                * Also, it's not MADV_COLLAPSE's job to collapse other
-                * mappings - let khugepaged take care of them later.
+                * When a vma is registered with uffd-wp, we cannot recycle
+                * the page table because there may be pte markers installed.
+                * Other vmas can still have the same file mapped hugely, but
+                * skip this one: it will always be mapped in small page size
+                * for uffd-wp registered ranges.
                 */
-               result = SCAN_PTE_MAPPED_HUGEPAGE;
-               if ((cc->is_khugepaged || is_target) &&
-                   mmap_write_trylock(mm)) {
-                       /* trylock for the same lock inversion as above */
-                       if (!vma_try_start_write(vma))
-                               goto unlock_next;
+               if (userfaultfd_wp(vma))
+                       continue;
+
+               /* PTEs were notified when unmapped; but now for the PMD? */
+               mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
+                                       addr, addr + HPAGE_PMD_SIZE);
+               mmu_notifier_invalidate_range_start(&range);
+
+               pml = pmd_lock(mm, pmd);
+               ptl = pte_lockptr(mm, pmd);
+               if (ptl != pml)
+                       spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
 
-                       /*
-                        * Re-check whether we have an ->anon_vma, because
-                        * collapse_and_free_pmd() requires that either no
-                        * ->anon_vma exists or the anon_vma is locked.
-                        * We already checked ->anon_vma above, but that check
-                        * is racy because ->anon_vma can be populated under the
-                        * mmap lock in read mode.
-                        */
-                       if (vma->anon_vma) {
-                               result = SCAN_PAGE_ANON;
-                               goto unlock_next;
-                       }
-                       /*
-                        * When a vma is registered with uffd-wp, we can't
-                        * recycle the pmd pgtable because there can be pte
-                        * markers installed.  Skip it only, so the rest mm/vma
-                        * can still have the same file mapped hugely, however
-                        * it'll always mapped in small page size for uffd-wp
-                        * registered ranges.
-                        */
-                       if (hpage_collapse_test_exit(mm)) {
-                               result = SCAN_ANY_PROCESS;
-                               goto unlock_next;
-                       }
-                       if (userfaultfd_wp(vma)) {
-                               result = SCAN_PTE_UFFD_WP;
-                               goto unlock_next;
-                       }
-                       collapse_and_free_pmd(mm, vma, addr, pmd);
-                       if (!cc->is_khugepaged && is_target)
-                               result = set_huge_pmd(vma, addr, pmd, hpage);
-                       else
-                               result = SCAN_SUCCEED;
-
-unlock_next:
-                       mmap_write_unlock(mm);
-                       goto next;
-               }
                /*
-                * Calling context will handle target mm/addr. Otherwise, let
-                * khugepaged try again later.
+                * Huge page lock is still held, so normally the page table
+                * must remain empty; and we have already skipped anon_vma
+                * and userfaultfd_wp() vmas.  But since the mmap_lock is not
+                * held, it is still possible for a racing userfaultfd_ioctl()
+                * to have inserted ptes or markers.  Now that we hold ptlock,
+                * repeating the anon_vma check protects from one category,
+                * and repeating the userfaultfd_wp() check from another.
                 */
-               if (!is_target) {
-                       khugepaged_add_pte_mapped_thp(mm, addr);
-                       continue;
+               if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) {
+                       skipped_uffd = true;
+               } else {
+                       pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
+                       pmdp_get_lockless_sync();
+               }
+
+               if (ptl != pml)
+                       spin_unlock(ptl);
+               spin_unlock(pml);
+
+               mmu_notifier_invalidate_range_end(&range);
+
+               if (!skipped_uffd) {
+                       mm_dec_nr_ptes(mm);
+                       page_table_check_pte_clear_range(mm, addr, pgt_pmd);
+                       pte_free_defer(mm, pmd_pgtable(pgt_pmd));
                }
-next:
-               if (is_target)
-                       target_result = result;
        }
-       i_mmap_unlock_write(mapping);
-       return target_result;
+       i_mmap_unlock_read(mapping);
 }
 
 /**
@@ -2072,8 +1965,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
                        goto out_unlock;
                }
 
-               if (folio_has_private(folio) &&
-                   !filemap_release_folio(folio, GFP_KERNEL)) {
+               if (!filemap_release_folio(folio, GFP_KERNEL)) {
                        result = SCAN_PAGE_HAS_PRIVATE;
                        folio_putback_lru(folio);
                        goto out_unlock;
@@ -2260,9 +2152,11 @@ immap_locked:
 
        /*
         * Remove pte page tables, so we can re-fault the page as huge.
+        * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp().
         */
-       result = retract_page_tables(mapping, start, mm, addr, hpage,
-                                    cc);
+       retract_page_tables(mapping, start);
+       if (cc && !cc->is_khugepaged)
+               result = SCAN_PTE_MAPPED_HUGEPAGE;
        unlock_page(hpage);
 
        /*
@@ -2423,16 +2317,6 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
 {
        BUILD_BUG();
 }
-
-static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
-{
-}
-
-static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
-                                         unsigned long addr)
-{
-       return false;
-}
 #endif
 
 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
@@ -2462,7 +2346,6 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
                khugepaged_scan.mm_slot = mm_slot;
        }
        spin_unlock(&khugepaged_mm_lock);
-       khugepaged_collapse_pte_mapped_thps(mm_slot);
 
        mm = slot->mm;
        /*
@@ -2515,36 +2398,27 @@ skip:
                                                khugepaged_scan.address);
 
                                mmap_read_unlock(mm);
-                               *result = hpage_collapse_scan_file(mm,
-                                                                  khugepaged_scan.address,
-                                                                  file, pgoff, cc);
                                mmap_locked = false;
+                               *result = hpage_collapse_scan_file(mm,
+                                       khugepaged_scan.address, file, pgoff, cc);
                                fput(file);
+                               if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
+                                       mmap_read_lock(mm);
+                                       if (hpage_collapse_test_exit(mm))
+                                               goto breakouterloop;
+                                       *result = collapse_pte_mapped_thp(mm,
+                                               khugepaged_scan.address, false);
+                                       if (*result == SCAN_PMD_MAPPED)
+                                               *result = SCAN_SUCCEED;
+                                       mmap_read_unlock(mm);
+                               }
                        } else {
                                *result = hpage_collapse_scan_pmd(mm, vma,
-                                                                 khugepaged_scan.address,
-                                                                 &mmap_locked,
-                                                                 cc);
+                                       khugepaged_scan.address, &mmap_locked, cc);
                        }
-                       switch (*result) {
-                       case SCAN_PTE_MAPPED_HUGEPAGE: {
-                               pmd_t *pmd;
-
-                               *result = find_pmd_or_thp_or_none(mm,
-                                                                 khugepaged_scan.address,
-                                                                 &pmd);
-                               if (*result != SCAN_SUCCEED)
-                                       break;
-                               if (!khugepaged_add_pte_mapped_thp(mm,
-                                                                  khugepaged_scan.address))
-                                       break;
-                       } fallthrough;
-                       case SCAN_SUCCEED:
+
+                       if (*result == SCAN_SUCCEED)
                                ++khugepaged_pages_collapsed;
-                               break;
-                       default:
-                               break;
-                       }
 
                        /* move to next address */
                        khugepaged_scan.address += HPAGE_PMD_SIZE;
@@ -2890,9 +2764,9 @@ handle_result:
                case SCAN_PTE_MAPPED_HUGEPAGE:
                        BUG_ON(mmap_locked);
                        BUG_ON(*prev);
-                       mmap_write_lock(mm);
+                       mmap_read_lock(mm);
                        result = collapse_pte_mapped_thp(mm, addr, true);
-                       mmap_write_unlock(mm);
+                       mmap_read_unlock(mm);
                        goto handle_result;
                /* Whitelisted set of results where continuing OK */
                case SCAN_PMD_NULL: