mm: shmem: change shmem_huge_global_enabled() to return huge order bitmap
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Thu, 28 Nov 2024 07:40:40 +0000 (15:40 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:36 +0000 (22:40 -0800)
Change the shmem_huge_global_enabled() to return the suitable huge order
bitmap, and return 0 if huge pages are not allowed.  This is a preparation
for supporting various huge orders allocation of tmpfs in the following
patches.

No functional changes.

Link: https://lkml.kernel.org/r/9dce1cfad3e9c1587cf1a0ea782ddbebd0e92984.1732779148.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Daniel Gomez <da.gomez@samsung.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index ac58d4fb2e6f5a617c85cd49de99e3c5f97209e6..bea9036028e76827b16622753acf0e1d3d530088 100644 (file)
@@ -554,37 +554,37 @@ static bool shmem_confirm_swap(struct address_space *mapping,
 
 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
 
-static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
-                                     loff_t write_end, bool shmem_huge_force,
-                                     unsigned long vm_flags)
+static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+                                             loff_t write_end, bool shmem_huge_force,
+                                             unsigned long vm_flags)
 {
        loff_t i_size;
 
        if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
-               return false;
+               return 0;
        if (!S_ISREG(inode->i_mode))
-               return false;
+               return 0;
        if (shmem_huge == SHMEM_HUGE_DENY)
-               return false;
+               return 0;
        if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
-               return true;
+               return BIT(HPAGE_PMD_ORDER);
 
        switch (SHMEM_SB(inode->i_sb)->huge) {
        case SHMEM_HUGE_ALWAYS:
-               return true;
+               return BIT(HPAGE_PMD_ORDER);
        case SHMEM_HUGE_WITHIN_SIZE:
                index = round_up(index + 1, HPAGE_PMD_NR);
                i_size = max(write_end, i_size_read(inode));
                i_size = round_up(i_size, PAGE_SIZE);
                if (i_size >> PAGE_SHIFT >= index)
-                       return true;
+                       return BIT(HPAGE_PMD_ORDER);
                fallthrough;
        case SHMEM_HUGE_ADVISE:
                if (vm_flags & VM_HUGEPAGE)
-                       return true;
+                       return BIT(HPAGE_PMD_ORDER);
                fallthrough;
        default:
-               return false;
+               return 0;
        }
 }
 
@@ -779,11 +779,11 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
        return 0;
 }
 
-static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
-                                     loff_t write_end, bool shmem_huge_force,
-                                     unsigned long vm_flags)
+static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+                                             loff_t write_end, bool shmem_huge_force,
+                                             unsigned long vm_flags)
 {
-       return false;
+       return 0;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
@@ -1690,21 +1690,21 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
        unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
        unsigned long vm_flags = vma ? vma->vm_flags : 0;
        pgoff_t aligned_index;
-       bool global_huge;
+       unsigned int global_orders;
        loff_t i_size;
        int order;
 
        if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
                return 0;
 
-       global_huge = shmem_huge_global_enabled(inode, index, write_end,
-                                               shmem_huge_force, vm_flags);
+       global_orders = shmem_huge_global_enabled(inode, index, write_end,
+                                                 shmem_huge_force, vm_flags);
        if (!vma || !vma_is_anon_shmem(vma)) {
                /*
                 * For tmpfs, we now only support PMD sized THP if huge page
                 * is enabled, otherwise fallback to order 0.
                 */
-               return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
+               return global_orders;
        }
 
        /*
@@ -1737,7 +1737,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
        if (vm_flags & VM_HUGEPAGE)
                mask |= READ_ONCE(huge_shmem_orders_madvise);
 
-       if (global_huge)
+       if (global_orders > 0)
                mask |= READ_ONCE(huge_shmem_orders_inherit);
 
        return THP_ORDERS_ALL_FILE_DEFAULT & mask;