mm: memory-failure: make put_ref_page() more useful
authorKefeng Wang <wangkefeng.wang@huawei.com>
Fri, 21 Oct 2022 08:46:09 +0000 (16:46 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 9 Nov 2022 01:37:20 +0000 (17:37 -0800)
Pass pfn/flags to put_ref_page(), then check MF_COUNT_INCREASED and drop
refcount to make the code look cleaner.

Link: https://lkml.kernel.org/r/20221021084611.53765-1-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory-failure.c

index 145bb561ddb3ad2b8a0b723a4fec4fc2ef487254..8c6a19b9790f9de63da5947cf65658509a560bbd 100644 (file)
@@ -1910,17 +1910,25 @@ static inline unsigned long free_raw_hwp_pages(struct page *hpage, bool flag)
 }
 #endif /* CONFIG_HUGETLB_PAGE */
 
+/* Drop the extra refcount in case we come from madvise() */
+static void put_ref_page(unsigned long pfn, int flags)
+{
+       struct page *page;
+
+       if (!(flags & MF_COUNT_INCREASED))
+               return;
+
+       page = pfn_to_page(pfn);
+       if (page)
+               put_page(page);
+}
+
 static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
                struct dev_pagemap *pgmap)
 {
-       struct page *page = pfn_to_page(pfn);
        int rc = -ENXIO;
 
-       if (flags & MF_COUNT_INCREASED)
-               /*
-                * Drop the extra refcount in case we come from madvise().
-                */
-               put_page(page);
+       put_ref_page(pfn, flags);
 
        /* device metadata space is not recoverable */
        if (!pgmap_pfn_valid(pgmap, pfn))
@@ -2513,12 +2521,6 @@ static int soft_offline_in_use_page(struct page *page)
        return ret;
 }
 
-static void put_ref_page(struct page *page)
-{
-       if (page)
-               put_page(page);
-}
-
 /**
  * soft_offline_page - Soft offline a page.
  * @pfn: pfn to soft-offline
@@ -2547,19 +2549,17 @@ int soft_offline_page(unsigned long pfn, int flags)
 {
        int ret;
        bool try_again = true;
-       struct page *page, *ref_page = NULL;
+       struct page *page;
 
        WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED));
 
        if (!pfn_valid(pfn))
                return -ENXIO;
-       if (flags & MF_COUNT_INCREASED)
-               ref_page = pfn_to_page(pfn);
 
        /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
        page = pfn_to_online_page(pfn);
        if (!page) {
-               put_ref_page(ref_page);
+               put_ref_page(pfn, flags);
                return -EIO;
        }
 
@@ -2567,7 +2567,7 @@ int soft_offline_page(unsigned long pfn, int flags)
 
        if (PageHWPoison(page)) {
                pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
-               put_ref_page(ref_page);
+               put_ref_page(pfn, flags);
                mutex_unlock(&mf_mutex);
                return 0;
        }