hugetlb: remove a few calls to page_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 24 Aug 2023 14:13:24 +0000 (15:13 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Oct 2023 17:32:31 +0000 (10:32 -0700)
Anything found on a linked list threaded through ->lru is guaranteed to be
a folio as the compound_head found in a tail page overlaps the ->lru
member of struct page.  So we can pull folios directly off these lists no
matter whether pages or folios were added to the list.

Link: https://lkml.kernel.org/r/20230824141325.2704553-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 7a6fe4f137779feda523a5aa842c32191469a86d..e91ce966a2c9130ce5fb9f1a05d819f9e31d2529 100644 (file)
@@ -1836,11 +1836,9 @@ static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
 
 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
 {
-       struct page *page, *t_page;
-       struct folio *folio;
+       struct folio *folio, *t_folio;
 
-       list_for_each_entry_safe(page, t_page, list, lru) {
-               folio = page_folio(page);
+       list_for_each_entry_safe(folio, t_folio, list, lru) {
                update_and_free_hugetlb_folio(h, folio, false);
                cond_resched();
        }
@@ -2229,8 +2227,7 @@ static struct page *remove_pool_huge_page(struct hstate *h,
                                                 bool acct_surplus)
 {
        int nr_nodes, node;
-       struct page *page = NULL;
-       struct folio *folio;
+       struct folio *folio = NULL;
 
        lockdep_assert_held(&hugetlb_lock);
        for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
@@ -2240,15 +2237,14 @@ static struct page *remove_pool_huge_page(struct hstate *h,
                 */
                if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
                    !list_empty(&h->hugepage_freelists[node])) {
-                       page = list_entry(h->hugepage_freelists[node].next,
-                                         struct page, lru);
-                       folio = page_folio(page);
+                       folio = list_entry(h->hugepage_freelists[node].next,
+                                         struct folio, lru);
                        remove_hugetlb_folio(h, folio, acct_surplus);
                        break;
                }
        }
 
-       return page;
+       return &folio->page;
 }
 
 /*
@@ -3414,15 +3410,15 @@ static void try_to_free_low(struct hstate *h, unsigned long count,
         * Collect pages to be freed on a list, and free after dropping lock
         */
        for_each_node_mask(i, *nodes_allowed) {
-               struct page *page, *next;
+               struct folio *folio, *next;
                struct list_head *freel = &h->hugepage_freelists[i];
-               list_for_each_entry_safe(page, next, freel, lru) {
+               list_for_each_entry_safe(folio, next, freel, lru) {
                        if (count >= h->nr_huge_pages)
                                goto out;
-                       if (PageHighMem(page))
+                       if (folio_test_highmem(folio))
                                continue;
-                       remove_hugetlb_folio(h, page_folio(page), false);
-                       list_add(&page->lru, &page_list);
+                       remove_hugetlb_folio(h, folio, false);
+                       list_add(&folio->lru, &page_list);
                }
        }