net/mlx4_core: Fix reset flow when in command polling mode
[linux-2.6-block.git] / mm / hugetlb.c
index df2e7dd5ff17fedb3dd2fdeb1ef9c76c72052eeb..8dfdffc34a99bcb099742fd9614196fd3e1f685f 100644 (file)
@@ -3624,7 +3624,6 @@ retry_avoidcopy:
        copy_user_huge_page(new_page, old_page, address, vma,
                            pages_per_huge_page(h));
        __SetPageUptodate(new_page);
-       set_page_huge_active(new_page);
 
        mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h));
        mmu_notifier_invalidate_range_start(&range);
@@ -3645,6 +3644,7 @@ retry_avoidcopy:
                                make_huge_pte(vma, new_page, 1));
                page_remove_rmap(old_page, true);
                hugepage_add_new_anon_rmap(new_page, vma, haddr);
+               set_page_huge_active(new_page);
                /* Make the old page be freed below */
                new_page = old_page;
        }
@@ -3729,6 +3729,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
        pte_t new_pte;
        spinlock_t *ptl;
        unsigned long haddr = address & huge_page_mask(h);
+       bool new_page = false;
 
        /*
         * Currently, we are forced to kill the process in the event the
@@ -3790,7 +3791,7 @@ retry:
                }
                clear_huge_page(page, address, pages_per_huge_page(h));
                __SetPageUptodate(page);
-               set_page_huge_active(page);
+               new_page = true;
 
                if (vma->vm_flags & VM_MAYSHARE) {
                        int err = huge_add_to_page_cache(page, mapping, idx);
@@ -3861,6 +3862,15 @@ retry:
        }
 
        spin_unlock(ptl);
+
+       /*
+        * Only make newly allocated pages active.  Existing pages found
+        * in the pagecache could be !page_huge_active() if they have been
+        * isolated for migration.
+        */
+       if (new_page)
+               set_page_huge_active(page);
+
        unlock_page(page);
 out:
        return ret;
@@ -4095,7 +4105,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
         * the set_pte_at() write.
         */
        __SetPageUptodate(page);
-       set_page_huge_active(page);
 
        mapping = dst_vma->vm_file->f_mapping;
        idx = vma_hugecache_offset(h, dst_vma, dst_addr);
@@ -4163,6 +4172,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        update_mmu_cache(dst_vma, dst_addr, dst_pte);
 
        spin_unlock(ptl);
+       set_page_huge_active(page);
        if (vm_shared)
                unlock_page(page);
        ret = 0;
@@ -4268,7 +4278,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                break;
                        }
                        if (ret & VM_FAULT_RETRY) {
-                               if (nonblocking)
+                               if (nonblocking &&
+                                   !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
                                        *nonblocking = 0;
                                *nr_pages = 0;
                                /*