Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[linux-2.6-block.git] / mm / hugetlb.c
index 0f580ea7f41d9137ecc379468f4e1029703cd802..d26162e81feaa78b2fb4615839cbb580fc626f4b 100644 (file)
@@ -627,6 +627,7 @@ pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 {
        return vma_hugecache_offset(hstate_vma(vma), vma, address);
 }
+EXPORT_SYMBOL_GPL(linear_hugepage_index);
 
 /*
  * Return the size of the pages allocated when backing a VMA. In the majority
@@ -1031,8 +1032,8 @@ static int __alloc_gigantic_page(unsigned long start_pfn,
        return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 }
 
-static bool pfn_range_valid_gigantic(unsigned long start_pfn,
-                               unsigned long nr_pages)
+static bool pfn_range_valid_gigantic(struct zone *z,
+                       unsigned long start_pfn, unsigned long nr_pages)
 {
        unsigned long i, end_pfn = start_pfn + nr_pages;
        struct page *page;
@@ -1043,6 +1044,9 @@ static bool pfn_range_valid_gigantic(unsigned long start_pfn,
 
                page = pfn_to_page(i);
 
+               if (page_zone(page) != z)
+                       return false;
+
                if (PageReserved(page))
                        return false;
 
@@ -1075,7 +1079,7 @@ static struct page *alloc_gigantic_page(int nid, unsigned int order)
 
                pfn = ALIGN(z->zone_start_pfn, nr_pages);
                while (zone_spans_last_pfn(z, pfn, nr_pages)) {
-                       if (pfn_range_valid_gigantic(pfn, nr_pages)) {
+                       if (pfn_range_valid_gigantic(z, pfn, nr_pages)) {
                                /*
                                 * We release the zone lock here because
                                 * alloc_contig_range() will also lock the zone