mm: shmem: add mTHP size alignment in shmem_get_unmapped_area
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Tue, 11 Jun 2024 10:11:09 +0000 (18:11 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 4 Jul 2024 02:30:04 +0000 (19:30 -0700)
Although the top-level hugepage allocation can be turned off, anonymous
shmem can still use mTHP by configuring the sysfs interface located at
'/sys/kernel/mm/transparent_hugepage/hugepage-XXkb/shmem_enabled'.
Therefore, add alignment for mTHP size to provide a suitable alignment
address in shmem_get_unmapped_area().

Link: https://lkml.kernel.org/r/0c549b57cf7db07503af692d8546ecfad0fcce52.1718090413.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Tested-by: Lance Yang <ioworker0@gmail.com>
Cc: Barry Song <v-songbaohua@oppo.com>
Cc: Daniel Gomez <da.gomez@samsung.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Pankaj Raghav <p.raghav@samsung.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index 8225b2f230ebb6a0bd93801df96d0fe0500e61ba..224162aa204bcdc6b397b0eecdf890b624e1bd01 100644 (file)
@@ -2393,6 +2393,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
        unsigned long inflated_len;
        unsigned long inflated_addr;
        unsigned long inflated_offset;
+       unsigned long hpage_size;
 
        if (len > TASK_SIZE)
                return -ENOMEM;
@@ -2411,8 +2412,6 @@ unsigned long shmem_get_unmapped_area(struct file *file,
 
        if (shmem_huge == SHMEM_HUGE_DENY)
                return addr;
-       if (len < HPAGE_PMD_SIZE)
-               return addr;
        if (flags & MAP_FIXED)
                return addr;
        /*
@@ -2424,8 +2423,11 @@ unsigned long shmem_get_unmapped_area(struct file *file,
        if (uaddr == addr)
                return addr;
 
+       hpage_size = HPAGE_PMD_SIZE;
        if (shmem_huge != SHMEM_HUGE_FORCE) {
                struct super_block *sb;
+               unsigned long __maybe_unused hpage_orders;
+               int order = 0;
 
                if (file) {
                        VM_BUG_ON(file->f_op != &shmem_file_operations);
@@ -2438,18 +2440,38 @@ unsigned long shmem_get_unmapped_area(struct file *file,
                        if (IS_ERR(shm_mnt))
                                return addr;
                        sb = shm_mnt->mnt_sb;
+
+                       /*
+                        * Find the highest mTHP order used for anonymous shmem to
+                        * provide a suitable alignment address.
+                        */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+                       hpage_orders = READ_ONCE(huge_shmem_orders_always);
+                       hpage_orders |= READ_ONCE(huge_shmem_orders_within_size);
+                       hpage_orders |= READ_ONCE(huge_shmem_orders_madvise);
+                       if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
+                               hpage_orders |= READ_ONCE(huge_shmem_orders_inherit);
+
+                       if (hpage_orders > 0) {
+                               order = highest_order(hpage_orders);
+                               hpage_size = PAGE_SIZE << order;
+                       }
+#endif
                }
-               if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
+               if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order)
                        return addr;
        }
 
-       offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
-       if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
+       if (len < hpage_size)
+               return addr;
+
+       offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1);
+       if (offset && offset + len < 2 * hpage_size)
                return addr;
-       if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
+       if ((addr & (hpage_size - 1)) == offset)
                return addr;
 
-       inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
+       inflated_len = len + hpage_size - PAGE_SIZE;
        if (inflated_len > TASK_SIZE)
                return addr;
        if (inflated_len < len)
@@ -2462,10 +2484,10 @@ unsigned long shmem_get_unmapped_area(struct file *file,
        if (inflated_addr & ~PAGE_MASK)
                return addr;
 
-       inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
+       inflated_offset = inflated_addr & (hpage_size - 1);
        inflated_addr += offset - inflated_offset;
        if (inflated_offset > offset)
-               inflated_addr += HPAGE_PMD_SIZE;
+               inflated_addr += hpage_size;
 
        if (inflated_addr > TASK_SIZE - len)
                return addr;