mmap locking API: convert mmap_sem API comments
authorMichel Lespinasse <walken@google.com>
Tue, 9 Jun 2020 04:33:51 +0000 (21:33 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 9 Jun 2020 16:39:14 +0000 (09:39 -0700)
Convert comments that reference old mmap_sem APIs to reference
corresponding new mmap locking APIs instead.

Signed-off-by: Michel Lespinasse <walken@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Laurent Dufour <ldufour@linux.ibm.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ying Han <yinghan@google.com>
Link: http://lkml.kernel.org/r/20200520052908.204642-12-walken@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
29 files changed:
Documentation/vm/hmm.rst
arch/alpha/mm/fault.c
arch/ia64/mm/fault.c
arch/m68k/mm/fault.c
arch/microblaze/mm/fault.c
arch/mips/mm/fault.c
arch/nds32/mm/fault.c
arch/nios2/mm/fault.c
arch/openrisc/mm/fault.c
arch/parisc/mm/fault.c
arch/riscv/mm/fault.c
arch/sh/mm/fault.c
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/xtensa/mm/fault.c
drivers/android/binder_alloc.c
fs/hugetlbfs/inode.c
fs/userfaultfd.c
mm/filemap.c
mm/gup.c
mm/huge_memory.c
mm/khugepaged.c
mm/ksm.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/mmap.c
mm/oom_kill.c
net/ipv4/tcp.c

index 561969754bc08b6d33009c996db923dd7d1edac0..6f9e000757fa07e4f0647259f88e81b71afe8cdf 100644 (file)
@@ -191,15 +191,15 @@ The usage pattern is::
 
  again:
       range.notifier_seq = mmu_interval_read_begin(&interval_sub);
-      down_read(&mm->mmap_sem);
+      mmap_read_lock(mm);
       ret = hmm_range_fault(&range);
       if (ret) {
-          up_read(&mm->mmap_sem);
+          mmap_read_unlock(mm);
           if (ret == -EBUSY)
                  goto again;
           return ret;
       }
-      up_read(&mm->mmap_sem);
+      mmap_read_unlock(mm);
 
       take_lock(driver->update);
       if (mmu_interval_read_retry(&ni, range.notifier_seq) {
index 36efa778ee1ac5625e68cc5c757d9bb4c9f5ce42..c2303a8c2b9f7ca40cdb7d01e03fb0a7bde8a2a9 100644 (file)
@@ -171,7 +171,7 @@ retry:
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
-                        /* No need to up_read(&mm->mmap_sem) as we would
+                        /* No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index e9ce969c8b73ea406fdd2daa7c3a0876a45bb63e..6c09f43d971127a1eff7d3fbd02150eed45f051a 100644 (file)
@@ -173,7 +173,7 @@ retry:
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
-                        /* No need to up_read(&mm->mmap_sem) as we would
+                        /* No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index 650acab0d77dfb85e0e643c745660285f22d7247..a94a814ad6adb49572e498cdcb599804d9f17e90 100644 (file)
@@ -165,7 +165,7 @@ good_area:
                        flags |= FAULT_FLAG_TRIED;
 
                        /*
-                        * No need to up_read(&mm->mmap_sem) as we would
+                        * No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index 952ab614d50ed53cea9853e0b30feb6189cd9142..74358902a5dbff4d23ec1dfeb1c877d2e7482374 100644 (file)
@@ -238,7 +238,7 @@ good_area:
                        flags |= FAULT_FLAG_TRIED;
 
                        /*
-                        * No need to up_read(&mm->mmap_sem) as we would
+                        * No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index 9ef2dd39111ed1fa888b86806ae10e892c215b54..01b168a90434aa564a20628a4586b5df9458864a 100644 (file)
@@ -181,7 +181,7 @@ good_area:
                        flags |= FAULT_FLAG_TRIED;
 
                        /*
-                        * No need to up_read(&mm->mmap_sem) as we would
+                        * No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index b92785588c30b730932b6728427df3f4d6c1a888..89831b6e1edeb6da4244fb97824d7cca15d47de9 100644 (file)
@@ -247,7 +247,7 @@ good_area:
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
-                       /* No need to up_read(&mm->mmap_sem) as we would
+                       /* No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index b8a0b51c6b0f3b1918d03cbb5e2516e479828f86..4112ef0e247ee438308968f45daaf9bbfbb679e6 100644 (file)
@@ -160,7 +160,7 @@ good_area:
                        flags |= FAULT_FLAG_TRIED;
 
                        /*
-                        * No need to up_read(&mm->mmap_sem) as we would
+                        * No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index 0bbb1a76949a5d7169c98e563873c5c63161735c..d2224ccca2941f4777c3f4aee06eb02db39f1e98 100644 (file)
@@ -183,7 +183,7 @@ good_area:
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
-                        /* No need to up_read(&mm->mmap_sem) as we would
+                        /* No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index bc840fdb398f895f61e13aa659cf5a7213057c29..66ac0719bd4927eb8dca1288728ddc2dd25d5f6a 100644 (file)
@@ -329,7 +329,7 @@ good_area:
                        current->min_flt++;
                if (fault & VM_FAULT_RETRY) {
                        /*
-                        * No need to up_read(&mm->mmap_sem) as we would
+                        * No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index cd7f4af95e56e6ead8625322d898588ddebd5f9e..996db5ebbf39e1642e4253130dfc6db2bd3f624d 100644 (file)
@@ -147,7 +147,7 @@ good_area:
                        flags |= FAULT_FLAG_TRIED;
 
                        /*
-                        * No need to up_read(&mm->mmap_sem) as we would
+                        * No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index d0c0d53512802067303f5fb437e0e75facb3144d..3a125aad586dbf0973d7a868da6fe7a5e926a6b1 100644 (file)
@@ -502,7 +502,7 @@ good_area:
                        flags |= FAULT_FLAG_TRIED;
 
                        /*
-                        * No need to up_read(&mm->mmap_sem) as we would
+                        * No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index 34588c4ab9d9438178a43e2930f9f319c99cf56c..cfef656eda0f948f814e943c313f4bf66c9b97be 100644 (file)
@@ -262,7 +262,7 @@ good_area:
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
-                       /* No need to up_read(&mm->mmap_sem) as we would
+                       /* No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index 417d7f677eb3a3546f66a4aaa19f2a05da072ffe..73f95a5ba6837cef695f2fa922823cb173c35eec 100644 (file)
@@ -450,7 +450,7 @@ good_area:
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
-                       /* No need to up_read(&mm->mmap_sem) as we would
+                       /* No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index 1c8d22a0cf4618997d2ffb27d7995229c40ac5a0..c4decc73fd86e0b9fa59abd80ea5a3b2be946b33 100644 (file)
@@ -130,7 +130,7 @@ good_area:
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
-                        /* No need to up_read(&mm->mmap_sem) as we would
+                        /* No need to mmap_read_unlock(mm) as we would
                         * have already released it in __lock_page_or_retry
                         * in mm/filemap.c.
                         */
index cbdc43ed0f9f5e07e46084ebec2297d7c3ddf472..42c672f1584e909544cc496a46994784ea172d5f 100644 (file)
@@ -933,7 +933,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
        if (!mmget_not_zero(mm))
                goto err_mmget;
        if (!mmap_read_trylock(mm))
-               goto err_down_read_mmap_sem_failed;
+               goto err_mmap_read_lock_failed;
        vma = binder_alloc_get_vma(alloc);
 
        list_lru_isolate(lru, item);
@@ -960,7 +960,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
        mutex_unlock(&alloc->mutex);
        return LRU_REMOVED_RETRY;
 
-err_down_read_mmap_sem_failed:
+err_mmap_read_lock_failed:
        mmput_async(mm);
 err_mmget:
 err_page_already_freed:
index f3420a643b4f3a7225c8e3871bc3522e9dc2ee83..ef5313f9c78fee07a8d318453a58e8f20e7793a3 100644 (file)
@@ -187,7 +187,7 @@ out:
 }
 
 /*
- * Called under down_write(mmap_sem).
+ * Called under mmap_write_lock(mm).
  */
 
 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
index 12b4924090406af1bb0b252c0e72e0b784df5cf8..3a63d75ed2fdb9eb6be79d57ea29bd2d94cafbf2 100644 (file)
@@ -1248,7 +1248,7 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
        /*
         * To be sure waitqueue_active() is not reordered by the CPU
         * before the pagetable update, use an explicit SMP memory
-        * barrier here. PT lock release or up_read(mmap_sem) still
+        * barrier here. PT lock release or mmap_read_unlock(mm) still
         * have release semantics that can allow the
         * waitqueue_active() to be reordered before the pte update.
         */
index b1442c4b36b7d518820dde51629b563e0131ecac..950cf12a10fc8046295989df2dfda1a3a160cd5c 100644 (file)
@@ -1373,7 +1373,7 @@ EXPORT_SYMBOL_GPL(__lock_page_killable);
  * Return values:
  * 1 - page is locked; mmap_sem is still held.
  * 0 - page is not locked.
- *     mmap_sem has been released (up_read()), unless flags had both
+ *     mmap_lock has been released (mmap_read_unlock(), unless flags had both
  *     FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
  *     which case mmap_sem is still held.
  *
index 762fb9f733b2c7d2124d06571f6e433bc8d9567f..bbb8851f4656eafaff5417bf6ede7f74a5cf01da 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1993,19 +1993,19 @@ EXPORT_SYMBOL(get_user_pages);
 /**
  * get_user_pages_locked() is suitable to replace the form:
  *
- *      down_read(&mm->mmap_sem);
+ *      mmap_read_lock(mm);
  *      do_something()
  *      get_user_pages(tsk, mm, ..., pages, NULL);
- *      up_read(&mm->mmap_sem);
+ *      mmap_read_unlock(mm);
  *
  *  to:
  *
  *      int locked = 1;
- *      down_read(&mm->mmap_sem);
+ *      mmap_read_lock(mm);
  *      do_something()
  *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
  *      if (locked)
- *          up_read(&mm->mmap_sem);
+ *          mmap_read_unlock(mm);
  *
  * @start:      starting user address
  * @nr_pages:   number of pages from start to pin
@@ -2050,9 +2050,9 @@ EXPORT_SYMBOL(get_user_pages_locked);
 /*
  * get_user_pages_unlocked() is suitable to replace the form:
  *
- *      down_read(&mm->mmap_sem);
+ *      mmap_read_lock(mm);
  *      get_user_pages(tsk, mm, ..., pages, NULL);
- *      up_read(&mm->mmap_sem);
+ *      mmap_read_unlock(mm);
  *
  *  with:
  *
index d9b2e0e0580a23fccf92f2fb656695eec8543056..de201f0b5a4af0155bd3a71f95014d8a3bd440c1 100644 (file)
@@ -1833,9 +1833,9 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                goto unlock;
 
        /*
-        * In case prot_numa, we are under down_read(mmap_sem). It's critical
+        * In case prot_numa, we are under mmap_read_lock(mm). It's critical
         * to not clear pmd intermittently to avoid race with MADV_DONTNEED
-        * which is also under down_read(mmap_sem):
+        * which is also under mmap_read_lock(mm):
         *
         *      CPU0:                           CPU1:
         *                              change_huge_pmd(prot_numa=1)
index 19f3401e568a419161936d99537db1a52393d45d..2c318ad1db20183270fdae95bdfe8b512cc0af29 100644 (file)
@@ -1543,7 +1543,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
                /*
                 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
                 * got written to. These VMAs are likely not worth investing
-                * down_write(mmap_sem) as PMD-mapping is likely to be split
+                * mmap_write_lock(mm) as PMD-mapping is likely to be split
                 * later.
                 *
                 * Not that vma->anon_vma check is racy: it can be set up after
index 098b580e7d76d124dfd0a7c7165020dc2d666e69..3efe7f28cc3f0299e23c652aad4d627a18ba451c 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2362,7 +2362,7 @@ next_mm:
        } else {
                mmap_read_unlock(mm);
                /*
-                * up_read(&mm->mmap_sem) first because after
+                * mmap_read_unlock(mm) first because after
                 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
                 * already have been freed under us by __ksm_exit()
                 * because the "mm_slot" is still hashed and
index 823982a8f0b0c8bea6d0ab3f539fb9996b506b5b..4e2e17bb128102a25e64468543d07b9965f6117a 100644 (file)
@@ -3323,10 +3323,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
         * pte_offset_map() on pmds where a huge pmd might be created
         * from a different thread.
         *
-        * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
+        * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
         * parallel threads are excluded by other means.
         *
-        * Here we only have down_read(mmap_sem).
+        * Here we only have mmap_read_lock(mm).
         */
        if (pte_alloc(vma->vm_mm, vmf->pmd))
                return VM_FAULT_OOM;
index 4930a9254068966779d7ca804d35fc7c6dda6a6a..a38cd4cc32060294a240d810cf7880150867543f 100644 (file)
@@ -2185,7 +2185,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
  *
  *     This function allocates a page from the kernel page pool and applies
  *     a NUMA policy associated with the VMA or the current process.
- *     When VMA is not NULL caller must hold down_read on the mmap_sem of the
+ *     When VMA is not NULL caller must read-lock the mmap_lock of the
  *     mm_struct of the VMA to prevent it from going away. Should be used for
  *     all allocations for pages that will be mapped into user space. Returns
  *     NULL when no page can be allocated.
index 0aa8f83789c5b67077791b7e2a623e8846b2e747..f69b09e0829c6437f00936f97a49c1b4656127cc 100644 (file)
@@ -2772,10 +2772,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
         * pte_offset_map() on pmds where a huge pmd might be created
         * from a different thread.
         *
-        * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
+        * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
         * parallel threads are excluded by other means.
         *
-        * Here we only have down_read(mmap_sem).
+        * Here we only have mmap_read_lock(mm).
         */
        if (pte_alloc(mm, pmdp))
                goto abort;
index a28778da76a3a30328d21b76b9762170774d90b7..79005049fbfcb2bfdd67e6d08b232a492a4b92b3 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1361,7 +1361,7 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode,
 }
 
 /*
- * The caller must hold down_write(&current->mm->mmap_sem).
+ * The caller must write-lock current->mm->mmap_lock.
  */
 unsigned long do_mmap(struct file *file, unsigned long addr,
                        unsigned long len, unsigned long prot,
index af3de7a92a9f36cd1277bb77174c692f1fbeb127..3b5d78dfebe9e5d650768da9b10c0342b4f7242e 100644 (file)
@@ -577,8 +577,8 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
        /*
         * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
         * work on the mm anymore. The check for MMF_OOM_SKIP must run
-        * under mmap_sem for reading because it serializes against the
-        * down_write();up_write() cycle in exit_mmap().
+        * under mmap_lock for reading because it serializes against the
+        * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
         */
        if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
                trace_skip_task_reaping(tsk->pid);
@@ -611,7 +611,7 @@ static void oom_reap_task(struct task_struct *tsk)
        int attempts = 0;
        struct mm_struct *mm = tsk->signal->oom_mm;
 
-       /* Retry the down_read_trylock(mmap_sem) a few times */
+       /* Retry the mmap_read_trylock(mm) a few times */
        while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
                schedule_timeout_idle(HZ/10);
 
@@ -629,7 +629,7 @@ done:
 
        /*
         * Hide this mm from OOM killer because it has been either reaped or
-        * somebody can't call up_write(mmap_sem).
+        * somebody can't call mmap_write_unlock(mm).
         */
        set_bit(MMF_OOM_SKIP, &mm->flags);
 
index 1714fe20ec8021fccaec0482f1751e3661ca8896..27716e4932bcd13cabb6299b01feef820b92c36c 100644 (file)
@@ -1734,7 +1734,7 @@ int tcp_mmap(struct file *file, struct socket *sock,
                return -EPERM;
        vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
 
-       /* Instruct vm_insert_page() to not down_read(mmap_sem) */
+       /* Instruct vm_insert_page() to not mmap_read_lock(mm) */
        vma->vm_flags |= VM_MIXEDMAP;
 
        vma->vm_ops = &tcp_vm_ops;