Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[linux-2.6-block.git] / mm / hugetlb.c
index 19d0d08b396fb1356bc4e834d2aaec2977ee173e..d26162e81feaa78b2fb4615839cbb580fc626f4b 100644 (file)
@@ -51,6 +51,7 @@ __initdata LIST_HEAD(huge_boot_pages);
 static struct hstate * __initdata parsed_hstate;
 static unsigned long __initdata default_hstate_max_huge_pages;
 static unsigned long __initdata default_hstate_size;
+static bool __initdata parsed_valid_hugepagesz = true;
 
 /*
  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
@@ -144,7 +145,8 @@ static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
                }
        }
 
-       if (spool->min_hpages != -1) {          /* minimum size accounting */
+       /* minimum size accounting */
+       if (spool->min_hpages != -1 && spool->rsv_hpages) {
                if (delta > spool->rsv_hpages) {
                        /*
                         * Asking for more reserves than those already taken on
@@ -182,7 +184,8 @@ static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
        if (spool->max_hpages != -1)            /* maximum size accounting */
                spool->used_hpages -= delta;
 
-       if (spool->min_hpages != -1) {          /* minimum size accounting */
+        /* minimum size accounting */
+       if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
                if (spool->rsv_hpages + delta <= spool->min_hpages)
                        ret = 0;
                else
@@ -624,6 +627,7 @@ pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 {
        return vma_hugecache_offset(hstate_vma(vma), vma, address);
 }
+EXPORT_SYMBOL_GPL(linear_hugepage_index);
 
 /*
  * Return the size of the pages allocated when backing a VMA. In the majority
@@ -937,9 +941,7 @@ err:
  */
 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
 {
-       nid = next_node(nid, *nodes_allowed);
-       if (nid == MAX_NUMNODES)
-               nid = first_node(*nodes_allowed);
+       nid = next_node_in(nid, *nodes_allowed);
        VM_BUG_ON(nid >= MAX_NUMNODES);
 
        return nid;
@@ -1030,8 +1032,8 @@ static int __alloc_gigantic_page(unsigned long start_pfn,
        return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 }
 
-static bool pfn_range_valid_gigantic(unsigned long start_pfn,
-                               unsigned long nr_pages)
+static bool pfn_range_valid_gigantic(struct zone *z,
+                       unsigned long start_pfn, unsigned long nr_pages)
 {
        unsigned long i, end_pfn = start_pfn + nr_pages;
        struct page *page;
@@ -1042,6 +1044,9 @@ static bool pfn_range_valid_gigantic(unsigned long start_pfn,
 
                page = pfn_to_page(i);
 
+               if (page_zone(page) != z)
+                       return false;
+
                if (PageReserved(page))
                        return false;
 
@@ -1074,7 +1079,7 @@ static struct page *alloc_gigantic_page(int nid, unsigned int order)
 
                pfn = ALIGN(z->zone_start_pfn, nr_pages);
                while (zone_spans_last_pfn(z, pfn, nr_pages)) {
-                       if (pfn_range_valid_gigantic(pfn, nr_pages)) {
+                       if (pfn_range_valid_gigantic(z, pfn, nr_pages)) {
                                /*
                                 * We release the zone lock here because
                                 * alloc_contig_range() will also lock the zone
@@ -2659,6 +2664,11 @@ static int __init hugetlb_init(void)
 subsys_initcall(hugetlb_init);
 
 /* Should be called on processing a hugepagesz=... option */
+void __init hugetlb_bad_size(void)
+{
+       parsed_valid_hugepagesz = false;
+}
+
 void __init hugetlb_add_hstate(unsigned int order)
 {
        struct hstate *h;
@@ -2678,8 +2688,8 @@ void __init hugetlb_add_hstate(unsigned int order)
        for (i = 0; i < MAX_NUMNODES; ++i)
                INIT_LIST_HEAD(&h->hugepage_freelists[i]);
        INIT_LIST_HEAD(&h->hugepage_activelist);
-       h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
-       h->next_nid_to_free = first_node(node_states[N_MEMORY]);
+       h->next_nid_to_alloc = first_memory_node;
+       h->next_nid_to_free = first_memory_node;
        snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
                                        huge_page_size(h)/1024);
 
@@ -2691,11 +2701,17 @@ static int __init hugetlb_nrpages_setup(char *s)
        unsigned long *mhp;
        static unsigned long *last_mhp;
 
+       if (!parsed_valid_hugepagesz) {
+               pr_warn("hugepages = %s preceded by "
+                       "an unsupported hugepagesz, ignoring\n", s);
+               parsed_valid_hugepagesz = true;
+               return 1;
+       }
        /*
         * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
         * so this hugepages= parameter goes to the "default hstate".
         */
-       if (!hugetlb_max_hstate)
+       else if (!hugetlb_max_hstate)
                mhp = &default_hstate_max_huge_pages;
        else
                mhp = &parsed_hstate->max_huge_pages;