mm: introduce compound_nr()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 23 Sep 2019 22:34:30 +0000 (15:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 24 Sep 2019 22:54:08 +0000 (15:54 -0700)
Replace 1 << compound_order(page) with compound_nr(page).  Minor
improvements in readability.

Link: http://lkml.kernel.org/r/20190721104612.19120-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
18 files changed:
arch/arm/mm/flush.c
arch/powerpc/mm/hugetlbpage.c
fs/proc/task_mmu.c
include/linux/mm.h
mm/compaction.c
mm/filemap.c
mm/gup.c
mm/hugetlb_cgroup.c
mm/kasan/common.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/migrate.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/swap_state.c
mm/util.c
mm/vmscan.c

index 4c7ebe094a83c27fc68e26fa6eeb18d0b8d85a80..6d89db7895d14c85743c09e2c60d92f5410f3fbf 100644 (file)
@@ -208,13 +208,13 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
        } else {
                unsigned long i;
                if (cache_is_vipt_nonaliasing()) {
-                       for (i = 0; i < (1 << compound_order(page)); i++) {
+                       for (i = 0; i < compound_nr(page); i++) {
                                void *addr = kmap_atomic(page + i);
                                __cpuc_flush_dcache_area(addr, PAGE_SIZE);
                                kunmap_atomic(addr);
                        }
                } else {
-                       for (i = 0; i < (1 << compound_order(page)); i++) {
+                       for (i = 0; i < compound_nr(page); i++) {
                                void *addr = kmap_high_get(page + i);
                                if (addr) {
                                        __cpuc_flush_dcache_area(addr, PAGE_SIZE);
index a8953f10880897eb62352b6b6734db312d902739..73d4873fc7f85442eafc08399446bc176fd82b9a 100644 (file)
@@ -667,7 +667,7 @@ void flush_dcache_icache_hugepage(struct page *page)
 
        BUG_ON(!PageCompound(page));
 
-       for (i = 0; i < (1UL << compound_order(page)); i++) {
+       for (i = 0; i < compound_nr(page); i++) {
                if (!PageHighMem(page)) {
                        __flush_dcache_icache(page_address(page+i));
                } else {
index bf43d1d600592680a8ce82635dab2792d9990bd4..ea1630465474e698d7e41d6804e1124e02b44069 100644 (file)
@@ -461,7 +461,7 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
 static void smaps_account(struct mem_size_stats *mss, struct page *page,
                bool compound, bool young, bool dirty, bool locked)
 {
-       int i, nr = compound ? 1 << compound_order(page) : 1;
+       int i, nr = compound ? compound_nr(page) : 1;
        unsigned long size = nr * PAGE_SIZE;
 
        /*
index 9238548bdec5534f52f096bc2e0abfe58fa7cee3..69b7314c8d24f242aa3a118315b133d1be5d83d7 100644 (file)
@@ -805,6 +805,12 @@ static inline void set_compound_order(struct page *page, unsigned int order)
        page[1].compound_order = order;
 }
 
+/* Returns the number of pages in this potentially compound page. */
+static inline unsigned long compound_nr(struct page *page)
+{
+       return 1UL << compound_order(page);
+}
+
 /* Returns the number of bytes in this potentially compound page. */
 static inline unsigned long page_size(struct page *page)
 {
index 952dc2fb24e50a26bee9621965ec6070b5d13346..777c088e911398a628b9562b7e5dcf38729689e0 100644 (file)
@@ -969,7 +969,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                         * is safe to read and it's 0 for tail pages.
                         */
                        if (unlikely(PageCompound(page))) {
-                               low_pfn += (1UL << compound_order(page)) - 1;
+                               low_pfn += compound_nr(page) - 1;
                                goto isolate_fail;
                        }
                }
index 40667c2f338372e229ec7f51f6eb165d02c858a7..5f30aedd7363f761ba31547ff1773307b6832caa 100644 (file)
@@ -126,7 +126,7 @@ static void page_cache_delete(struct address_space *mapping,
        /* hugetlb pages are represented by a single entry in the xarray */
        if (!PageHuge(page)) {
                xas_set_order(&xas, page->index, compound_order(page));
-               nr = 1U << compound_order(page);
+               nr = compound_nr(page);
        }
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
index 98f13ab37bacc1d206760e6e6ab554a2536ff7a7..84a36d80dd2ed33ce695e31aa3a4bc68dd2401fc 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1460,7 +1460,7 @@ check_again:
                 * gup may start from a tail page. Advance step by the left
                 * part.
                 */
-               step = (1 << compound_order(head)) - (pages[i] - head);
+               step = compound_nr(head) - (pages[i] - head);
                /*
                 * If we get a page from the CMA zone, since we are going to
                 * be pinning these entries, we might as well move them out
index 68c2f2f3c05b76203fae22ad8cf727482c95216e..f1930fa0b445dae721d9be0139e2b2ba7119ec97 100644 (file)
@@ -139,7 +139,7 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
        if (!page_hcg || page_hcg != h_cg)
                goto out;
 
-       nr_pages = 1 << compound_order(page);
+       nr_pages = compound_nr(page);
        if (!parent) {
                parent = root_h_cgroup;
                /* root has no limit */
index 307631d9c62b80310922258855d04a716f07124c..6814d6d6a023d8e00af10954bf5252ceea9fad10 100644 (file)
@@ -336,7 +336,7 @@ void kasan_poison_slab(struct page *page)
 {
        unsigned long i;
 
-       for (i = 0; i < (1 << compound_order(page)); i++)
+       for (i = 0; i < compound_nr(page); i++)
                page_kasan_tag_reset(page + i);
        kasan_poison_shadow(page_address(page), page_size(page),
                        KASAN_KMALLOC_REDZONE);
index f3c15bb07cce4be6dc9eb6143da2625828c56c4a..6c6032c03d1de7f2397176c21a9652b6a2e7071c 100644 (file)
@@ -6511,7 +6511,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
                unsigned int nr_pages = 1;
 
                if (PageTransHuge(page)) {
-                       nr_pages <<= compound_order(page);
+                       nr_pages = compound_nr(page);
                        ug->nr_huge += nr_pages;
                }
                if (PageAnon(page))
@@ -6523,7 +6523,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
                }
                ug->pgpgout++;
        } else {
-               ug->nr_kmem += 1 << compound_order(page);
+               ug->nr_kmem += compound_nr(page);
                __ClearPageKmemcg(page);
        }
 
index c73f0991316511fb5471d3382f2d719c6cbfd759..5f2c83ce9fdea3ccfbeb9f79be29fcfcee59e1f9 100644 (file)
@@ -1309,7 +1309,7 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
                head = compound_head(page);
                if (page_huge_active(head))
                        return pfn;
-               skip = (1 << compound_order(head)) - (page - head);
+               skip = compound_nr(head) - (page - head);
                pfn += skip - 1;
        }
        return 0;
@@ -1347,7 +1347,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 
                if (PageHuge(page)) {
                        struct page *head = compound_head(page);
-                       pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
+                       pfn = page_to_pfn(head) + compound_nr(head) - 1;
                        isolate_huge_page(head, &source);
                        continue;
                } else if (PageTransHuge(page))
index 9f4ed4e985c1fe3468df8d2f55e803a6a41bb44a..aa72b49e020915de787dbab810262fe64032bb12 100644 (file)
@@ -1892,7 +1892,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
        VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
 
        /* Avoid migrating to a node that is nearly full */
-       if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
+       if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
                return 0;
 
        if (isolate_lru_page(page))
index ff5484fdbdf9908a9064129f2990a13f4cfc2247..df566c0f67297c64f8edcbb786e2aa348f453465 100644 (file)
@@ -8196,7 +8196,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                        if (!hugepage_migration_supported(page_hstate(head)))
                                goto unmovable;
 
-                       skip_pages = (1 << compound_order(head)) - (page - head);
+                       skip_pages = compound_nr(head) - (page - head);
                        iter += skip_pages - 1;
                        continue;
                }
index f401732b20e85951367d11451801628fc65622ba..26006445c8b520c75d3a5acf21da27132453de15 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1520,8 +1520,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
                        pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
                        if (PageHuge(page)) {
-                               int nr = 1 << compound_order(page);
-                               hugetlb_count_sub(nr, mm);
+                               hugetlb_count_sub(compound_nr(page), mm);
                                set_huge_swap_pte_at(mm, address,
                                                     pvmw.pte, pteval,
                                                     vma_mmu_pagesize(vma));
index 0f7fd4a85db6e5f3497d21c1d8d512cf121c62e1..15d26c86e5ef222e0ce2454c748eca4745b49b28 100644 (file)
@@ -609,7 +609,7 @@ static int shmem_add_to_page_cache(struct page *page,
 {
        XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
        unsigned long i = 0;
-       unsigned long nr = 1UL << compound_order(page);
+       unsigned long nr = compound_nr(page);
 
        VM_BUG_ON_PAGE(PageTail(page), page);
        VM_BUG_ON_PAGE(index != round_down(index, nr), page);
@@ -1884,7 +1884,7 @@ alloc_nohuge:
        lru_cache_add_anon(page);
 
        spin_lock_irq(&info->lock);
-       info->alloced += 1 << compound_order(page);
+       info->alloced += compound_nr(page);
        inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
        shmem_recalc_inode(inode);
        spin_unlock_irq(&info->lock);
@@ -1925,7 +1925,7 @@ clear:
                struct page *head = compound_head(page);
                int i;
 
-               for (i = 0; i < (1 << compound_order(head)); i++) {
+               for (i = 0; i < compound_nr(head); i++) {
                        clear_highpage(head + i);
                        flush_dcache_page(head + i);
                }
@@ -1952,7 +1952,7 @@ clear:
         * Error recovery.
         */
 unacct:
-       shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
+       shmem_inode_unacct_blocks(inode, compound_nr(page));
 
        if (PageTransHuge(page)) {
                unlock_page(page);
index 8368621a0fc70cfc3743ac36b04b249aaa4c3d09..f844af5f09ba58bd1b599af7e85bf6b8113102f1 100644 (file)
@@ -116,7 +116,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
        struct address_space *address_space = swap_address_space(entry);
        pgoff_t idx = swp_offset(entry);
        XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
-       unsigned long i, nr = 1UL << compound_order(page);
+       unsigned long i, nr = compound_nr(page);
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(PageSwapCache(page), page);
index e6351a80f24885edd910c9a997ef6f4acbaceeef..bab284d69c8cd52234149904112977fb5f66357e 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -521,7 +521,7 @@ bool page_mapped(struct page *page)
                return true;
        if (PageHuge(page))
                return false;
-       for (i = 0; i < (1 << compound_order(page)); i++) {
+       for (i = 0; i < compound_nr(page); i++) {
                if (atomic_read(&page[i]._mapcount) >= 0)
                        return true;
        }
index a6c5d0b28321c383037aab176bfbfca35755a320..8e03427cb64f6eebc20c4b24fd597e33a938c175 100644 (file)
@@ -1149,7 +1149,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                VM_BUG_ON_PAGE(PageActive(page), page);
 
-               nr_pages = 1 << compound_order(page);
+               nr_pages = compound_nr(page);
 
                /* Account the number of base pages even though THP */
                sc->nr_scanned += nr_pages;
@@ -1705,7 +1705,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 
                VM_BUG_ON_PAGE(!PageLRU(page), page);
 
-               nr_pages = 1 << compound_order(page);
+               nr_pages = compound_nr(page);
                total_scan += nr_pages;
 
                if (page_zonenum(page) > sc->reclaim_idx) {