mm: introduce page_size()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 23 Sep 2019 22:34:25 +0000 (15:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 24 Sep 2019 22:54:08 +0000 (15:54 -0700)
Patch series "Make working with compound pages easier", v2.

These three patches add three helpers and convert the appropriate
places to use them.

This patch (of 3):

It's unnecessarily hard to find out the size of a potentially huge page.
Replace 'PAGE_SIZE << compound_order(page)' with page_size(page).

Link: http://lkml.kernel.org/r/20190721104612.19120-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
17 files changed:
arch/arm/mm/flush.c
arch/arm64/mm/flush.c
arch/ia64/mm/init.c
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/staging/android/ion/ion_system_heap.c
drivers/target/tcm_fc/tfc_io.c
fs/io_uring.c
include/linux/hugetlb.h
include/linux/mm.h
lib/iov_iter.c
mm/kasan/common.c
mm/nommu.c
mm/page_vma_mapped.c
mm/rmap.c
mm/slob.c
mm/slub.c
net/xdp/xsk.c

index 6ecbda87ee4683f0f6252636fd5aa81af74cf8d4..4c7ebe094a83c27fc68e26fa6eeb18d0b8d85a80 100644 (file)
@@ -204,8 +204,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
         * coherent with the kernels mapping.
         */
        if (!PageHighMem(page)) {
-               size_t page_size = PAGE_SIZE << compound_order(page);
-               __cpuc_flush_dcache_area(page_address(page), page_size);
+               __cpuc_flush_dcache_area(page_address(page), page_size(page));
        } else {
                unsigned long i;
                if (cache_is_vipt_nonaliasing()) {
index dc19300309d2880e4ad74c4baaaa40e3b5f619d8..ac485163a4a7669f0e6d1b1eca683c65ad8c59ec 100644 (file)
@@ -56,8 +56,7 @@ void __sync_icache_dcache(pte_t pte)
        struct page *page = pte_page(pte);
 
        if (!test_and_set_bit(PG_dcache_clean, &page->flags))
-               sync_icache_aliases(page_address(page),
-                                   PAGE_SIZE << compound_order(page));
+               sync_icache_aliases(page_address(page), page_size(page));
 }
 EXPORT_SYMBOL_GPL(__sync_icache_dcache);
 
index 678b98a09c854c78cb2ac3e69a7a2f9261e2a890..bf9df2625bc8393c92e427311f29b8b7cc5ce728 100644 (file)
@@ -64,7 +64,7 @@ __ia64_sync_icache_dcache (pte_t pte)
        if (test_bit(PG_arch_1, &page->flags))
                return;                         /* i-cache is already coherent with d-cache */
 
-       flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
+       flush_icache_range(addr, addr + page_size(page));
        set_bit(PG_arch_1, &page->flags);       /* mark page as clean */
 }
 
index c70cb5f272cf05a8c7ab962976e9bc324ea57c91..0891ab829b1b6b1318353e945e6394fab29f7c1e 100644 (file)
@@ -1078,7 +1078,7 @@ new_buf:
                        bool merge;
 
                        if (page)
-                               pg_size <<= compound_order(page);
+                               pg_size = page_size(page);
                        if (off < pg_size &&
                            skb_can_coalesce(skb, i, page, off)) {
                                merge = 1;
@@ -1105,8 +1105,7 @@ new_buf:
                                                           __GFP_NORETRY,
                                                           order);
                                        if (page)
-                                               pg_size <<=
-                                                       compound_order(page);
+                                               pg_size <<= order;
                                }
                                if (!page) {
                                        page = alloc_page(gfp);
index aa8d8425be25e29b53a31b1e5f95c51e036622f4..b83a1d16bd8983a3620ee526cbde3de2e84592cb 100644 (file)
@@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
                if (!page)
                        goto free_pages;
                list_add_tail(&page->lru, &pages);
-               size_remaining -= PAGE_SIZE << compound_order(page);
+               size_remaining -= page_size(page);
                max_order = compound_order(page);
                i++;
        }
@@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
 
        sg = table->sgl;
        list_for_each_entry_safe(page, tmp_page, &pages, lru) {
-               sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
+               sg_set_page(sg, page, page_size(page), 0);
                sg = sg_next(sg);
                list_del(&page->lru);
        }
index a254792d882cc1541578436f5588bfeda59ccf15..1354a157e9afcfc6d743649bbe170b3436e7253c 100644 (file)
@@ -136,8 +136,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
                                           page, off_in_page, tlen);
                        fr_len(fp) += tlen;
                        fp_skb(fp)->data_len += tlen;
-                       fp_skb(fp)->truesize +=
-                                       PAGE_SIZE << compound_order(page);
+                       fp_skb(fp)->truesize += page_size(page);
                } else {
                        BUG_ON(!page);
                        from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
index 0dadbdbead0fbfef8b0f2373756b3254832ada53..f83de4c6a826e1d146dc389517069efcd2e02b4d 100644 (file)
@@ -3319,7 +3319,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
        }
 
        page = virt_to_head_page(ptr);
-       if (sz > (PAGE_SIZE << compound_order(page)))
+       if (sz > page_size(page))
                return -EINVAL;
 
        pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
index edfca427831928abc6a072c4e0d94daee6d7811a..53fc34f930d08cd8edcfb95834e882d713de49fe 100644 (file)
@@ -454,7 +454,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
 static inline struct hstate *page_hstate(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageHuge(page), page);
-       return size_to_hstate(PAGE_SIZE << compound_order(page));
+       return size_to_hstate(page_size(page));
 }
 
 static inline unsigned hstate_index_to_shift(unsigned index)
index 6e79b3df1582c30e13e6266a7a1ac4b66f82f324..d46d5585e2a2236f05718166e5c428bbfc0d6279 100644 (file)
@@ -805,6 +805,12 @@ static inline void set_compound_order(struct page *page, unsigned int order)
        page[1].compound_order = order;
 }
 
+/* Returns the number of bytes in this potentially compound page. */
+static inline unsigned long page_size(struct page *page)
+{
+       return PAGE_SIZE << compound_order(page);
+}
+
 void free_compound_page(struct page *page);
 
 #ifdef CONFIG_MMU
index f1e0569b4539b8b8e976f6aff26b79f016f2edfd..639d5e7014c1e5a3f29a1f530233c23f455df993 100644 (file)
@@ -878,7 +878,7 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
        head = compound_head(page);
        v += (page - head) << PAGE_SHIFT;
 
-       if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
+       if (likely(n <= v && v <= (page_size(head))))
                return true;
        WARN_ON(1);
        return false;
index 6b6f1198c72b3247bffbe8db3ea5c1f09e51b007..307631d9c62b80310922258855d04a716f07124c 100644 (file)
@@ -338,8 +338,7 @@ void kasan_poison_slab(struct page *page)
 
        for (i = 0; i < (1 << compound_order(page)); i++)
                page_kasan_tag_reset(page + i);
-       kasan_poison_shadow(page_address(page),
-                       PAGE_SIZE << compound_order(page),
+       kasan_poison_shadow(page_address(page), page_size(page),
                        KASAN_KMALLOC_REDZONE);
 }
 
@@ -542,7 +541,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
        page = virt_to_page(ptr);
        redzone_start = round_up((unsigned long)(ptr + size),
                                KASAN_SHADOW_SCALE_SIZE);
-       redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
+       redzone_end = (unsigned long)ptr + page_size(page);
 
        kasan_unpoison_shadow(ptr, size);
        kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
@@ -578,8 +577,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
                        kasan_report_invalid_free(ptr, ip);
                        return;
                }
-               kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
-                               KASAN_FREE_PAGE);
+               kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
        } else {
                __kasan_slab_free(page->slab_cache, ptr, ip, false);
        }
index fed1b6e9c89b4361c68100fec1bfb3e693dc9918..99b7ec318824c8689d22d4f005016fd61e3b8de8 100644 (file)
@@ -108,7 +108,7 @@ unsigned int kobjsize(const void *objp)
         * The ksize() function is only guaranteed to work for pointers
         * returned by kmalloc(). So handle arbitrary pointers here.
         */
-       return PAGE_SIZE << compound_order(page);
+       return page_size(page);
 }
 
 /**
index 11df03e71288c3fe0b78e164eca835ac4332e5ca..eff4b4520c8d5c7603efb2a5f77f3ec3d8d79a3e 100644 (file)
@@ -153,8 +153,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 
        if (unlikely(PageHuge(pvmw->page))) {
                /* when pud is not present, pte will be NULL */
-               pvmw->pte = huge_pte_offset(mm, pvmw->address,
-                                           PAGE_SIZE << compound_order(page));
+               pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
                if (!pvmw->pte)
                        return false;
 
index 31352bba197dc2170956102da8da9afe52ff228f..f401732b20e85951367d11451801628fc65622ba 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -898,8 +898,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
         */
        mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
                                0, vma, vma->vm_mm, address,
-                               min(vma->vm_end, address +
-                                   (PAGE_SIZE << compound_order(page))));
+                               min(vma->vm_end, address + page_size(page)));
        mmu_notifier_invalidate_range_start(&range);
 
        while (page_vma_mapped_walk(&pvmw)) {
@@ -1372,8 +1371,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
         */
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
                                address,
-                               min(vma->vm_end, address +
-                                   (PAGE_SIZE << compound_order(page))));
+                               min(vma->vm_end, address + page_size(page)));
        if (PageHuge(page)) {
                /*
                 * If sharing is possible, start and end will be adjusted
index 7f421d0ca9abbcd3a17ca467f1221465537d982e..cf377beab96212bc8e717eaabfac6b263b9108cf 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -539,7 +539,7 @@ size_t __ksize(const void *block)
 
        sp = virt_to_page(block);
        if (unlikely(!PageSlab(sp)))
-               return PAGE_SIZE << compound_order(sp);
+               return page_size(sp);
 
        align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
        m = (unsigned int *)(block - align);
index 17fe1cac11fb1b772a4f0417dbe0b423af0461d6..42c1b3af3c9805fd6ae0e7028aa614fac979f433 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -829,7 +829,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
                return 1;
 
        start = page_address(page);
-       length = PAGE_SIZE << compound_order(page);
+       length = page_size(page);
        end = start + length;
        remainder = length % s->size;
        if (!remainder)
@@ -1074,13 +1074,14 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
        init_tracking(s, object);
 }
 
-static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
+static
+void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
 {
        if (!(s->flags & SLAB_POISON))
                return;
 
        metadata_access_enable();
-       memset(addr, POISON_INUSE, PAGE_SIZE << order);
+       memset(addr, POISON_INUSE, page_size(page));
        metadata_access_disable();
 }
 
@@ -1340,8 +1341,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
 #else /* !CONFIG_SLUB_DEBUG */
 static inline void setup_object_debug(struct kmem_cache *s,
                        struct page *page, void *object) {}
-static inline void setup_page_debug(struct kmem_cache *s,
-                       void *addr, int order) {}
+static inline
+void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
 
 static inline int alloc_debug_processing(struct kmem_cache *s,
        struct page *page, void *object, unsigned long addr) { return 0; }
@@ -1639,7 +1640,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
        struct kmem_cache_order_objects oo = s->oo;
        gfp_t alloc_gfp;
        void *start, *p, *next;
-       int idx, order;
+       int idx;
        bool shuffle;
 
        flags &= gfp_allowed_mask;
@@ -1673,7 +1674,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 
        page->objects = oo_objects(oo);
 
-       order = compound_order(page);
        page->slab_cache = s;
        __SetPageSlab(page);
        if (page_is_pfmemalloc(page))
@@ -1683,7 +1683,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 
        start = page_address(page);
 
-       setup_page_debug(s, start, order);
+       setup_page_debug(s, page, start);
 
        shuffle = shuffle_freelist(s, page);
 
@@ -3932,7 +3932,7 @@ size_t __ksize(const void *object)
 
        if (unlikely(!PageSlab(page))) {
                WARN_ON(!PageCompound(page));
-               return PAGE_SIZE << compound_order(page);
+               return page_size(page);
        }
 
        return slab_ksize(page->slab_cache);
index c2f1af3b6a7c4ec2aed2beab304e0692fb462535..fa8fbb8fa3c823aff9cb06d25f4335b662cc93c2 100644 (file)
@@ -977,7 +977,7 @@ static int xsk_mmap(struct file *file, struct socket *sock,
        /* Matches the smp_wmb() in xsk_init_queue */
        smp_rmb();
        qpg = virt_to_head_page(q->ring);
-       if (size > (PAGE_SIZE << compound_order(qpg)))
+       if (size > page_size(qpg))
                return -EINVAL;
 
        pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;