Merge tag 'libnvdimm-for-4.19_dax-memory-failure' of gitolite.kernel.org:pub/scm...
[linux-block.git] / mm / huge_memory.c
index 78427af91de96980a8450ef2c8dbeccbb59078d0..c3bc7e9c9a2acc550ea8aeb68720a8c5a611c610 100644 (file)
@@ -541,14 +541,14 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
 }
 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
 
-static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
-               gfp_t gfp)
+static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
+                       struct page *page, gfp_t gfp)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct mem_cgroup *memcg;
        pgtable_t pgtable;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
-       int ret = 0;
+       vm_fault_t ret = 0;
 
        VM_BUG_ON_PAGE(!PageCompound(page), page);
 
@@ -584,15 +584,15 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
 
                /* Deliver the page fault to userland */
                if (userfaultfd_missing(vma)) {
-                       int ret;
+                       vm_fault_t ret2;
 
                        spin_unlock(vmf->ptl);
                        mem_cgroup_cancel_charge(page, memcg, true);
                        put_page(page);
                        pte_free(vma->vm_mm, pgtable);
-                       ret = handle_userfault(vmf, VM_UFFD_MISSING);
-                       VM_BUG_ON(ret & VM_FAULT_FALLBACK);
-                       return ret;
+                       ret2 = handle_userfault(vmf, VM_UFFD_MISSING);
+                       VM_BUG_ON(ret2 & VM_FAULT_FALLBACK);
+                       return ret2;
                }
 
                entry = mk_huge_pmd(page, vma->vm_page_prot);
@@ -663,7 +663,7 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
        return true;
 }
 
-int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
+vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        gfp_t gfp;
@@ -682,7 +682,7 @@ int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
                pgtable_t pgtable;
                struct page *zero_page;
                bool set;
-               int ret;
+               vm_fault_t ret;
                pgtable = pte_alloc_one(vma->vm_mm, haddr);
                if (unlikely(!pgtable))
                        return VM_FAULT_OOM;
@@ -752,7 +752,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
        spin_unlock(ptl);
 }
 
-int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
                        pmd_t *pmd, pfn_t pfn, bool write)
 {
        pgprot_t pgprot = vma->vm_page_prot;
@@ -812,7 +812,7 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
        spin_unlock(ptl);
 }
 
-int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
                        pud_t *pud, pfn_t pfn, bool write)
 {
        pgprot_t pgprot = vma->vm_page_prot;
@@ -1118,15 +1118,16 @@ unlock:
        spin_unlock(vmf->ptl);
 }
 
-static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
-               struct page *page)
+static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
+                       pmd_t orig_pmd, struct page *page)
 {
        struct vm_area_struct *vma = vmf->vma;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
        struct mem_cgroup *memcg;
        pgtable_t pgtable;
        pmd_t _pmd;
-       int ret = 0, i;
+       int i;
+       vm_fault_t ret = 0;
        struct page **pages;
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
@@ -1236,7 +1237,7 @@ out_free_pages:
        goto out;
 }
 
-int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
+vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct page *page = NULL, *new_page;
@@ -1245,7 +1246,7 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
        gfp_t huge_gfp;                 /* for allocation and charge */
-       int ret = 0;
+       vm_fault_t ret = 0;
 
        vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
        VM_BUG_ON_VMA(!vma->anon_vma, vma);
@@ -1457,7 +1458,7 @@ out:
 }
 
 /* NUMA hinting page fault entry point for trans huge pmds */
-int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
+vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct anon_vma *anon_vma = NULL;