mm/hugetlb: convert dissolve_free_huge_pages() to folios
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Thu, 11 Apr 2024 16:47:56 +0000 (09:47 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 6 May 2024 00:53:35 +0000 (17:53 -0700)
Allows us to rename dissolve_free_huge_pages() to
dissolve_free_hugetlb_folio(). Convert one caller to pass in a folio
directly and use page_folio() to convert the caller in mm/memory-failure.

[sidhartha.kumar@oracle.com: remove unneeded `extern']
Link: https://lkml.kernel.org/r/71760ed4-e80d-493a-95ea-2545414b1aba@oracle.com
[sidhartha.kumar@oracle.com: v2]
Link: https://lkml.kernel.org/r/20240412182139.120871-1-sidhartha.kumar@oracle.com
Link: https://lkml.kernel.org/r/20240411164756.261178-1-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/hugetlb.c
mm/memory-failure.c

index 3f3e628802792aaf53e0fbb57e2515a70437e78e..8968e8a3a205dc3ac77a8f20ff43e0960a30a2dc 100644 (file)
@@ -861,7 +861,7 @@ static inline int hstate_index(struct hstate *h)
        return h - hstates;
 }
 
-extern int dissolve_free_huge_page(struct page *page);
+int dissolve_free_hugetlb_folio(struct folio *folio);
 extern int dissolve_free_huge_pages(unsigned long start_pfn,
                                    unsigned long end_pfn);
 
@@ -1148,7 +1148,7 @@ static inline int hstate_index(struct hstate *h)
        return 0;
 }
 
-static inline int dissolve_free_huge_page(struct page *page)
+static inline int dissolve_free_hugetlb_folio(struct folio *folio)
 {
        return 0;
 }
index d74289d3f30db5f2890ff1f9cd48cf085349f901..40bcaf2bad5596b236ee8a6d0003407d448866d2 100644 (file)
@@ -2377,8 +2377,8 @@ static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
 }
 
 /*
- * Dissolve a given free hugepage into free buddy pages. This function does
- * nothing for in-use hugepages and non-hugepages.
+ * Dissolve a given free hugetlb folio into free buddy pages. This function
+ * does nothing for in-use hugetlb folios and non-hugetlb folios.
  * This function returns values like below:
  *
  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
@@ -2390,10 +2390,9 @@ static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
  *       0:  successfully dissolved free hugepages or the page is not a
  *           hugepage (considered as already dissolved)
  */
-int dissolve_free_huge_page(struct page *page)
+int dissolve_free_hugetlb_folio(struct folio *folio)
 {
        int rc = -EBUSY;
-       struct folio *folio = page_folio(page);
 
 retry:
        /* Not to disrupt normal path by vainly holding hugetlb_lock */
@@ -2470,13 +2469,13 @@ out:
  * make specified memory blocks removable from the system.
  * Note that this will dissolve a free gigantic hugepage completely, if any
  * part of it lies within the given range.
- * Also note that if dissolve_free_huge_page() returns with an error, all
- * free hugepages that were dissolved before that error are lost.
+ * Also note that if dissolve_free_hugetlb_folio() returns with an error, all
+ * free hugetlb folios that were dissolved before that error are lost.
  */
 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
 {
        unsigned long pfn;
-       struct page *page;
+       struct folio *folio;
        int rc = 0;
        unsigned int order;
        struct hstate *h;
@@ -2489,8 +2488,8 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
                order = min(order, huge_page_order(h));
 
        for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
-               page = pfn_to_page(pfn);
-               rc = dissolve_free_huge_page(page);
+               folio = pfn_folio(pfn);
+               rc = dissolve_free_hugetlb_folio(folio);
                if (rc)
                        break;
        }
index db8d770ce80122fb34481b7b2d66dded21f3a29c..68e1fe1c0b724c358e16a31ec57c463f6b68cad1 100644 (file)
@@ -155,7 +155,7 @@ static int __page_handle_poison(struct page *page)
 
        /*
         * zone_pcp_disable() can't be used here. It will
-        * hold pcp_batch_high_lock and dissolve_free_huge_page() might hold
+        * hold pcp_batch_high_lock and dissolve_free_hugetlb_folio() might hold
         * cpu_hotplug_lock via static_key_slow_dec() when hugetlb vmemmap
         * optimization is enabled. This will break current lock dependency
         * chain and leads to deadlock.
@@ -165,7 +165,7 @@ static int __page_handle_poison(struct page *page)
         * but nothing guarantees that those pages do not get back to a PCP
         * queue if we need to refill those.
         */
-       ret = dissolve_free_huge_page(page);
+       ret = dissolve_free_hugetlb_folio(page_folio(page));
        if (!ret) {
                drain_all_pages(page_zone(page));
                ret = take_page_off_buddy(page);
@@ -178,8 +178,8 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo
 {
        if (hugepage_or_freepage) {
                /*
-                * Doing this check for free pages is also fine since dissolve_free_huge_page
-                * returns 0 for non-hugetlb pages as well.
+                * Doing this check for free pages is also fine since
+                * dissolve_free_hugetlb_folio() returns 0 for non-hugetlb folios as well.
                 */
                if (__page_handle_poison(page) <= 0)
                        /*