mm: delete mmap_write_trylock() and vma_try_start_write()
authorHugh Dickins <hughd@google.com>
Wed, 12 Jul 2023 04:48:48 +0000 (21:48 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 18 Aug 2023 17:12:25 +0000 (10:12 -0700)
mmap_write_trylock() and vma_try_start_write() were added just for
khugepaged, but now it has no use for them: delete.

Link: https://lkml.kernel.org/r/4e6db3d-e8e-73fb-1f2a-8de2dab2a87c@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Huang, Ying <ying.huang@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jann Horn <jannh@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: SeongJae Park <sj@kernel.org>
Cc: Song Liu <song@kernel.org>
Cc: Steven Price <steven.price@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zack Rusin <zackr@vmware.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
include/linux/mmap_lock.h

index d1ad22980ebe7ab0fd06986c976ac6bb628c414c..bfb46483108cddcb096766c35818172a709a4044 100644 (file)
@@ -709,21 +709,6 @@ static inline void vma_start_write(struct vm_area_struct *vma)
        up_write(&vma->vm_lock->lock);
 }
 
-static inline bool vma_try_start_write(struct vm_area_struct *vma)
-{
-       int mm_lock_seq;
-
-       if (__is_vma_write_locked(vma, &mm_lock_seq))
-               return true;
-
-       if (!down_write_trylock(&vma->vm_lock->lock))
-               return false;
-
-       WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
-       up_write(&vma->vm_lock->lock);
-       return true;
-}
-
 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
 {
        int mm_lock_seq;
@@ -748,8 +733,6 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
                { return false; }
 static inline void vma_end_read(struct vm_area_struct *vma) {}
 static inline void vma_start_write(struct vm_area_struct *vma) {}
-static inline bool vma_try_start_write(struct vm_area_struct *vma)
-               { return true; }
 static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
 static inline void vma_mark_detached(struct vm_area_struct *vma,
                                     bool detached) {}
index e05e167dbd166eafa4d5796e700d67a357fa3b6f..a5c63b6d7d46c1b64f770607373657d9a7b07ee5 100644 (file)
@@ -118,16 +118,6 @@ static inline int mmap_write_lock_killable(struct mm_struct *mm)
        return ret;
 }
 
-static inline bool mmap_write_trylock(struct mm_struct *mm)
-{
-       bool ret;
-
-       __mmap_lock_trace_start_locking(mm, true);
-       ret = down_write_trylock(&mm->mmap_lock) != 0;
-       __mmap_lock_trace_acquire_returned(mm, true, ret);
-       return ret;
-}
-
 static inline void mmap_write_unlock(struct mm_struct *mm)
 {
        __mmap_lock_trace_released(mm, true);