mm/hugetlb: handle races in alloc_huge_page and hugetlb_reserve_pages
[linux-2.6-block.git] / mm / hugetlb.c
index cd3fc41947330b6f351845a58b726febb7ab5f0c..75c0eef52c5df4f3b534e439a56495f8ac8534f3 100644 (file)
@@ -1542,7 +1542,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        struct hugepage_subpool *spool = subpool_vma(vma);
        struct hstate *h = hstate_vma(vma);
        struct page *page;
-       long chg;
+       long chg, commit;
        int ret, idx;
        struct hugetlb_cgroup *h_cg;
 
@@ -1583,7 +1583,22 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
 
        set_page_private(page, (unsigned long)spool);
 
-       vma_commit_reservation(h, vma, addr);
+       commit = vma_commit_reservation(h, vma, addr);
+       if (unlikely(chg > commit)) {
+               /*
+                * The page was added to the reservation map between
+                * vma_needs_reservation and vma_commit_reservation.
+                * This indicates a race with hugetlb_reserve_pages.
+                * Adjust for the subpool count incremented above AND
+                * in hugetlb_reserve_pages for the same page.  Also,
+                * the reservation count added in hugetlb_reserve_pages
+                * no longer applies.
+                */
+               long rsv_adjust;
+
+               rsv_adjust = hugepage_subpool_put_pages(spool, 1);
+               hugetlb_acct_memory(h, -rsv_adjust);
+       }
        return page;
 
 out_uncharge_cgroup:
@@ -3701,8 +3716,24 @@ int hugetlb_reserve_pages(struct inode *inode,
         * consumed reservations are stored in the map. Hence, nothing
         * else has to be done for private mappings here
         */
-       if (!vma || vma->vm_flags & VM_MAYSHARE)
-               region_add(resv_map, from, to);
+       if (!vma || vma->vm_flags & VM_MAYSHARE) {
+               long add = region_add(resv_map, from, to);
+
+               if (unlikely(chg > add)) {
+                       /*
+                        * pages in this range were added to the reserve
+                        * map between region_chg and region_add.  This
+                        * indicates a race with alloc_huge_page.  Adjust
+                        * the subpool and reserve counts modified above
+                        * based on the difference.
+                        */
+                       long rsv_adjust;
+
+                       rsv_adjust = hugepage_subpool_put_pages(spool,
+                                                               chg - add);
+                       hugetlb_acct_memory(h, -rsv_adjust);
+               }
+       }
        return 0;
 out_err:
        if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))