Merge branch 'mm-rst' into docs-next
[linux-block.git] / mm / hugetlb.c
index 5af974abae460a2e22357311cf2f81a6c7209ae7..129088710510048ae0f48f41bebb79857bdce7e1 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/bootmem.h>
 #include <linux/sysfs.h>
 #include <linux/slab.h>
+#include <linux/mmdebug.h>
 #include <linux/sched/signal.h>
 #include <linux/rmap.h>
 #include <linux/string_helpers.h>
@@ -636,29 +637,22 @@ EXPORT_SYMBOL_GPL(linear_hugepage_index);
  */
 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
 {
-       struct hstate *hstate;
-
-       if (!is_vm_hugetlb_page(vma))
-               return PAGE_SIZE;
-
-       hstate = hstate_vma(vma);
-
-       return 1UL << huge_page_shift(hstate);
+       if (vma->vm_ops && vma->vm_ops->pagesize)
+               return vma->vm_ops->pagesize(vma);
+       return PAGE_SIZE;
 }
 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
 
 /*
  * Return the page size being used by the MMU to back a VMA. In the majority
  * of cases, the page size used by the kernel matches the MMU size. On
- * architectures where it differs, an architecture-specific version of this
- * function is required.
+ * architectures where it differs, an architecture-specific 'strong'
+ * version of this symbol is required.
  */
-#ifndef vma_mmu_pagesize
-unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
+__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 {
        return vma_kernel_pagesize(vma);
 }
-#endif
 
 /*
  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
@@ -1583,7 +1577,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
                page = NULL;
        } else {
                h->surplus_huge_pages++;
-               h->nr_huge_pages_node[page_to_nid(page)]++;
+               h->surplus_huge_pages_node[page_to_nid(page)]++;
        }
 
 out_unlock:
@@ -3152,6 +3146,13 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
        return 0;
 }
 
+static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
+{
+       struct hstate *hstate = hstate_vma(vma);
+
+       return 1UL << huge_page_shift(hstate);
+}
+
 /*
  * We cannot handle pagefaults against hugetlb pages at all.  They cause
  * handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -3169,6 +3170,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
        .open = hugetlb_vm_op_open,
        .close = hugetlb_vm_op_close,
        .split = hugetlb_vm_op_split,
+       .pagesize = hugetlb_vm_op_pagesize,
 };
 
 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
@@ -4374,6 +4376,12 @@ int hugetlb_reserve_pages(struct inode *inode,
        struct resv_map *resv_map;
        long gbl_reserve;
 
+       /* This should never happen */
+       if (from > to) {
+               VM_WARN(1, "%s called with a negative range\n", __func__);
+               return -EINVAL;
+       }
+
        /*
         * Only apply hugepage reservation if asked. At fault time, an
         * attempt will be made for VM_NORESERVE to allocate a page