hugetlbfs: New huge_add_to_page_cache helper routine
authorMike Kravetz <mike.kravetz@oracle.com>
Tue, 8 Sep 2015 22:01:50 +0000 (15:01 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 8 Sep 2015 22:35:28 +0000 (15:35 -0700)
Currently, there is only a single place where hugetlbfs pages are added
to the page cache.  The new fallocate code be adding a second one, so
break the functionality out into its own helper.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/hugetlb.h
mm/hugetlb.c

index 35afca1692fb9d1b59b0528df27bf4aad2a0ce07..1222fb07a74654beae1591cec2e07c27c7376ccc 100644 (file)
@@ -333,6 +333,8 @@ struct huge_bootmem_page {
 struct page *alloc_huge_page_node(struct hstate *h, int nid);
 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
                                unsigned long addr, int avoid_reserve);
+int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+                       pgoff_t idx);
 
 /* arch callback */
 int __init alloc_bootmem_huge_page(struct hstate *h);
index 114ad6ce7030add5e95cf3e8829a96965ba431d3..d45eacc5653ed7e84ad650d45682b3b9fff0ea45 100644 (file)
@@ -3375,6 +3375,23 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
        return page != NULL;
 }
 
+int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+                          pgoff_t idx)
+{
+       struct inode *inode = mapping->host;
+       struct hstate *h = hstate_inode(inode);
+       int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
+
+       if (err)
+               return err;
+       ClearPagePrivate(page);
+
+       spin_lock(&inode->i_lock);
+       inode->i_blocks += blocks_per_huge_page(h);
+       spin_unlock(&inode->i_lock);
+       return 0;
+}
+
 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
                           struct address_space *mapping, pgoff_t idx,
                           unsigned long address, pte_t *ptep, unsigned int flags)
@@ -3422,21 +3439,13 @@ retry:
                set_page_huge_active(page);
 
                if (vma->vm_flags & VM_MAYSHARE) {
-                       int err;
-                       struct inode *inode = mapping->host;
-
-                       err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
+                       int err = huge_add_to_page_cache(page, mapping, idx);
                        if (err) {
                                put_page(page);
                                if (err == -EEXIST)
                                        goto retry;
                                goto out;
                        }
-                       ClearPagePrivate(page);
-
-                       spin_lock(&inode->i_lock);
-                       inode->i_blocks += blocks_per_huge_page(h);
-                       spin_unlock(&inode->i_lock);
                } else {
                        lock_page(page);
                        if (unlikely(anon_vma_prepare(vma))) {