Merge tag 'iommu-updates-v4.19' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / mm / hugetlb.c
index 47566bb0b4b12fb126d01e4dc464737829363454..3c21775f196b2f38a25ed05687791a4c79a3d3d4 100644 (file)
@@ -1479,22 +1479,20 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
 /*
  * Dissolve a given free hugepage into free buddy pages. This function does
  * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
- * number of free hugepages would be reduced below the number of reserved
- * hugepages.
+ * dissolution fails because a give page is not a free hugepage, or because
+ * free hugepages are fully reserved.
  */
 int dissolve_free_huge_page(struct page *page)
 {
-       int rc = 0;
+       int rc = -EBUSY;
 
        spin_lock(&hugetlb_lock);
        if (PageHuge(page) && !page_count(page)) {
                struct page *head = compound_head(page);
                struct hstate *h = page_hstate(head);
                int nid = page_to_nid(head);
-               if (h->free_huge_pages - h->resv_huge_pages == 0) {
-                       rc = -EBUSY;
+               if (h->free_huge_pages - h->resv_huge_pages == 0)
                        goto out;
-               }
                /*
                 * Move PageHWPoison flag from head page to the raw error page,
                 * which makes any subpages rather than the error page reusable.
@@ -1508,6 +1506,7 @@ int dissolve_free_huge_page(struct page *page)
                h->free_huge_pages_node[nid]--;
                h->max_huge_pages--;
                update_and_free_page(h, head);
+               rc = 0;
        }
 out:
        spin_unlock(&hugetlb_lock);
@@ -3502,14 +3501,15 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  * cannot race with other handlers or page migration.
  * Keep the pte_same checks anyway to make transition from the mutex easier.
  */
-static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
+static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
                       unsigned long address, pte_t *ptep,
                       struct page *pagecache_page, spinlock_t *ptl)
 {
        pte_t pte;
        struct hstate *h = hstate_vma(vma);
        struct page *old_page, *new_page;
-       int ret = 0, outside_reserve = 0;
+       int outside_reserve = 0;
+       vm_fault_t ret = 0;
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
        unsigned long haddr = address & huge_page_mask(h);
@@ -3573,8 +3573,7 @@ retry_avoidcopy:
                        return 0;
                }
 
-               ret = (PTR_ERR(new_page) == -ENOMEM) ?
-                       VM_FAULT_OOM : VM_FAULT_SIGBUS;
+               ret = vmf_error(PTR_ERR(new_page));
                goto out_release_old;
        }
 
@@ -3677,12 +3676,13 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
        return 0;
 }
 
-static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
-                          struct address_space *mapping, pgoff_t idx,
-                          unsigned long address, pte_t *ptep, unsigned int flags)
+static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
+                       struct vm_area_struct *vma,
+                       struct address_space *mapping, pgoff_t idx,
+                       unsigned long address, pte_t *ptep, unsigned int flags)
 {
        struct hstate *h = hstate_vma(vma);
-       int ret = VM_FAULT_SIGBUS;
+       vm_fault_t ret = VM_FAULT_SIGBUS;
        int anon_rmap = 0;
        unsigned long size;
        struct page *page;
@@ -3745,11 +3745,7 @@ retry:
 
                page = alloc_huge_page(vma, haddr, 0);
                if (IS_ERR(page)) {
-                       ret = PTR_ERR(page);
-                       if (ret == -ENOMEM)
-                               ret = VM_FAULT_OOM;
-                       else
-                               ret = VM_FAULT_SIGBUS;
+                       ret = vmf_error(PTR_ERR(page));
                        goto out;
                }
                clear_huge_page(page, address, pages_per_huge_page(h));
@@ -3873,12 +3869,12 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
 }
 #endif
 
-int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, unsigned int flags)
 {
        pte_t *ptep, entry;
        spinlock_t *ptl;
-       int ret;
+       vm_fault_t ret;
        u32 hash;
        pgoff_t idx;
        struct page *page = NULL;
@@ -4208,7 +4204,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                if (absent || is_swap_pte(huge_ptep_get(pte)) ||
                    ((flags & FOLL_WRITE) &&
                      !huge_pte_write(huge_ptep_get(pte)))) {
-                       int ret;
+                       vm_fault_t ret;
                        unsigned int fault_flags = 0;
 
                        if (pte)