mm: shmem: rename shmem_is_huge() to shmem_huge_global_enabled()
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Mon, 22 Jul 2024 05:43:18 +0000 (13:43 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 2 Sep 2024 03:25:44 +0000 (20:25 -0700)
shmem_is_huge() is now used to check if the top-level huge page is
enabled, thus rename it to reflect its usage.

Link: https://lkml.kernel.org/r/da53296e0ab6359aa083561d9dc01e4223d60fbe.1721626645.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Barry Song <21cnbao@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/shmem_fs.h
mm/huge_memory.c
mm/shmem.c

index 1d06b1e5408a53aca0f61f4285175179c5bb7c55..405ee8d3589a5338c509ebe4fba4eb474a4c0873 100644 (file)
@@ -111,14 +111,15 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
 int shmem_unuse(unsigned int type);
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
-                         struct mm_struct *mm, unsigned long vm_flags);
+extern bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+                                     struct mm_struct *mm, unsigned long vm_flags);
 unsigned long shmem_allowable_huge_orders(struct inode *inode,
                                struct vm_area_struct *vma, pgoff_t index,
                                bool global_huge);
 #else
-static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
-                                         struct mm_struct *mm, unsigned long vm_flags)
+static __always_inline bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+                                                     bool shmem_huge_force, struct mm_struct *mm,
+                                                     unsigned long vm_flags)
 {
        return false;
 }
index 67c86a5d64a6a9825092c54ba801c91fa30121db..3af9366b1c4c82dd7b39af0f728672c72fac92e6 100644 (file)
@@ -160,8 +160,9 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
         * own flags.
         */
        if (!in_pf && shmem_file(vma->vm_file)) {
-               bool global_huge = shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
-                                                       !enforce_sysfs, vma->vm_mm, vm_flags);
+               bool global_huge = shmem_huge_global_enabled(file_inode(vma->vm_file),
+                                                            vma->vm_pgoff, !enforce_sysfs,
+                                                            vma->vm_mm, vm_flags);
 
                if (!vma_is_anon_shmem(vma))
                        return global_huge ? orders : 0;
index 7889b499d33fa09c1692eb01bb9deb95bdb48e54..2a86b0d9f5164c60c14ea86a3ce0087a7e134f8f 100644 (file)
@@ -548,9 +548,9 @@ static bool shmem_confirm_swap(struct address_space *mapping,
 
 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
 
-static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
-                           bool shmem_huge_force, struct mm_struct *mm,
-                           unsigned long vm_flags)
+static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+                                       bool shmem_huge_force, struct mm_struct *mm,
+                                       unsigned long vm_flags)
 {
        loff_t i_size;
 
@@ -581,14 +581,15 @@ static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
        }
 }
 
-bool shmem_is_huge(struct inode *inode, pgoff_t index,
+bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
                   bool shmem_huge_force, struct mm_struct *mm,
                   unsigned long vm_flags)
 {
        if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
                return false;
 
-       return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags);
+       return __shmem_huge_global_enabled(inode, index, shmem_huge_force,
+                                          mm, vm_flags);
 }
 
 #if defined(CONFIG_SYSFS)
@@ -1156,7 +1157,7 @@ static int shmem_getattr(struct mnt_idmap *idmap,
                        STATX_ATTR_NODUMP);
        generic_fillattr(idmap, request_mask, inode, stat);
 
-       if (shmem_is_huge(inode, 0, false, NULL, 0))
+       if (shmem_huge_global_enabled(inode, 0, false, NULL, 0))
                stat->blksize = HPAGE_PMD_SIZE;
 
        if (request_mask & STATX_BTIME) {
@@ -2149,7 +2150,7 @@ repeat:
                return 0;
        }
 
-       huge = shmem_is_huge(inode, index, false, fault_mm,
+       huge = shmem_huge_global_enabled(inode, index, false, fault_mm,
                             vma ? vma->vm_flags : 0);
        /* Find hugepage orders that are allowed for anonymous shmem. */
        if (vma && vma_is_anon_shmem(vma))