mm: return a folio from read_swap_cache_async()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 13 Dec 2023 21:58:41 +0000 (21:58 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 29 Dec 2023 19:58:32 +0000 (11:58 -0800)
The only two callers simply call put_page() on the page returned, so
they're happier calling folio_put().  Saves two calls to compound_head().

Link: https://lkml.kernel.org/r/20231213215842.671461-13-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/madvise.c
mm/swap.h
mm/swap_state.c

index 6214a1ab5654f442c3ad5ae9c39678f703a303fd..912155a94ed5871c1805f33ec624c7c7c1ee28c8 100644 (file)
@@ -180,7 +180,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
        for (addr = start; addr < end; addr += PAGE_SIZE) {
                pte_t pte;
                swp_entry_t entry;
-               struct page *page;
+               struct folio *folio;
 
                if (!ptep++) {
                        ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
@@ -198,10 +198,10 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
                pte_unmap_unlock(ptep, ptl);
                ptep = NULL;
 
-               page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
+               folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
                                             vma, addr, &splug);
-               if (page)
-                       put_page(page);
+               if (folio)
+                       folio_put(folio);
        }
 
        if (ptep)
@@ -223,17 +223,17 @@ static void shmem_swapin_range(struct vm_area_struct *vma,
 {
        XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
        pgoff_t end_index = linear_page_index(vma, end) - 1;
-       struct page *page;
+       struct folio *folio;
        struct swap_iocb *splug = NULL;
 
        rcu_read_lock();
-       xas_for_each(&xas, page, end_index) {
+       xas_for_each(&xas, folio, end_index) {
                unsigned long addr;
                swp_entry_t entry;
 
-               if (!xa_is_value(page))
+               if (!xa_is_value(folio))
                        continue;
-               entry = radix_to_swp_entry(page);
+               entry = radix_to_swp_entry(folio);
                /* There might be swapin error entries in shmem mapping. */
                if (non_swap_entry(entry))
                        continue;
@@ -243,10 +243,10 @@ static void shmem_swapin_range(struct vm_area_struct *vma,
                xas_pause(&xas);
                rcu_read_unlock();
 
-               page = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
+               folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
                                             vma, addr, &splug);
-               if (page)
-                       put_page(page);
+               if (folio)
+                       folio_put(folio);
 
                rcu_read_lock();
        }
index 6bf25342589fcb8f4e7373df9a563b4eeed792ef..82c68ccb5ab1a98b33aa3e6f10e3ed46ca9ed238 100644 (file)
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -46,10 +46,9 @@ struct folio *swap_cache_get_folio(swp_entry_t entry,
 struct folio *filemap_get_incore_folio(struct address_space *mapping,
                pgoff_t index);
 
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
-                                  struct vm_area_struct *vma,
-                                  unsigned long addr,
-                                  struct swap_iocb **plug);
+struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+               struct vm_area_struct *vma, unsigned long addr,
+               struct swap_iocb **plug);
 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
                struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
                bool skip_if_exists);
index d2fe70e307d94b312f616bbb5dd041456fa7ce51..97c8a950dd1814ced790cd0075f14a637f34ca30 100644 (file)
@@ -533,9 +533,9 @@ fail_put_swap:
  * __read_swap_cache_async() call them and swap_read_folio() holds the
  * swap cache folio lock.
  */
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
-                                  struct vm_area_struct *vma,
-                                  unsigned long addr, struct swap_iocb **plug)
+struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+               struct vm_area_struct *vma, unsigned long addr,
+               struct swap_iocb **plug)
 {
        bool page_allocated;
        struct mempolicy *mpol;
@@ -549,7 +549,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 
        if (page_allocated)
                swap_read_folio(folio, false, plug);
-       return folio_file_page(folio, swp_offset(entry));
+       return folio;
 }
 
 static unsigned int __swapin_nr_pages(unsigned long prev_offset,