mm/hugetlb: convert hugetlb_add_to_page_cache to take in a folio
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Wed, 25 Jan 2023 17:05:35 +0000 (09:05 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 13 Feb 2023 23:54:29 +0000 (15:54 -0800)
Every caller of hugetlb_add_to_page_cache() is now passing in
&folio->page, change the function to take in a folio directly and clean up
the call sites.

Link: https://lkml.kernel.org/r/20230125170537.96973-7-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/hugetlbfs/inode.c
include/linux/hugetlb.h
mm/hugetlb.c

index c736947e73da382c4f1954bd1f2bc5c2bb284a77..cfd09f95551b85345acebff13c4c5beb4ff37e32 100644 (file)
@@ -871,7 +871,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
                }
                clear_huge_page(&folio->page, addr, pages_per_huge_page(h));
                __folio_mark_uptodate(folio);
-               error = hugetlb_add_to_page_cache(&folio->page, mapping, index);
+               error = hugetlb_add_to_page_cache(folio, mapping, index);
                if (unlikely(error)) {
                        restore_reserve_on_error(h, &pseudo_vma, addr, folio);
                        folio_put(folio);
index 20ceaaea1697abe151ccd5f2cf58c365dbf7c242..df6dd624ccfe8afc0837c4669bfed9d6e933c1d0 100644 (file)
@@ -723,7 +723,7 @@ struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
                                nodemask_t *nmask, gfp_t gfp_mask);
 struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
                                unsigned long address);
-int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
+int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
                        pgoff_t idx);
 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
                                unsigned long address, struct folio *folio);
index 1f6270c586c007bbc86f8c865cf99daf733c5f7d..de1f73e5e200d95c3461e0b83d3fbcd8a2210714 100644 (file)
@@ -5662,10 +5662,9 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
        return present;
 }
 
-int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
+int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
                           pgoff_t idx)
 {
-       struct folio *folio = page_folio(page);
        struct inode *inode = mapping->host;
        struct hstate *h = hstate_inode(inode);
        int err;
@@ -5677,7 +5676,7 @@ int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
                __folio_clear_locked(folio);
                return err;
        }
-       ClearHPageRestoreReserve(page);
+       folio_clear_hugetlb_restore_reserve(folio);
 
        /*
         * mark folio dirty so that it will not be removed from cache/file
@@ -5836,7 +5835,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
                new_folio = true;
 
                if (vma->vm_flags & VM_MAYSHARE) {
-                       int err = hugetlb_add_to_page_cache(&folio->page, mapping, idx);
+                       int err = hugetlb_add_to_page_cache(folio, mapping, idx);
                        if (err) {
                                /*
                                 * err can't be -EEXIST which implies someone
@@ -6269,7 +6268,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
                 * hugetlb_fault_mutex_table that here must be hold by
                 * the caller.
                 */
-               ret = hugetlb_add_to_page_cache(&folio->page, mapping, idx);
+               ret = hugetlb_add_to_page_cache(folio, mapping, idx);
                if (ret)
                        goto out_release_nounlock;
                folio_in_pagecache = true;