net/mlx4_core: Fix reset flow when in command polling mode
[linux-2.6-block.git] / mm / mremap.c
index 7f9f9180e4013084a02fd27390321240af972e3c..3320616ed93f4bb56ced4ad7bc9f6af9be4ebd31 100644 (file)
@@ -191,22 +191,66 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
                drop_rmap_locks(vma);
 }
 
+#ifdef CONFIG_HAVE_MOVE_PMD
+static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+                 unsigned long new_addr, unsigned long old_end,
+                 pmd_t *old_pmd, pmd_t *new_pmd)
+{
+       spinlock_t *old_ptl, *new_ptl;
+       struct mm_struct *mm = vma->vm_mm;
+       pmd_t pmd;
+
+       if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK)
+           || old_end - old_addr < PMD_SIZE)
+               return false;
+
+       /*
+        * The destination pmd shouldn't be established, free_pgtables()
+        * should have release it.
+        */
+       if (WARN_ON(!pmd_none(*new_pmd)))
+               return false;
+
+       /*
+        * We don't have to worry about the ordering of src and dst
+        * ptlocks because exclusive mmap_sem prevents deadlock.
+        */
+       old_ptl = pmd_lock(vma->vm_mm, old_pmd);
+       new_ptl = pmd_lockptr(mm, new_pmd);
+       if (new_ptl != old_ptl)
+               spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+
+       /* Clear the pmd */
+       pmd = *old_pmd;
+       pmd_clear(old_pmd);
+
+       VM_BUG_ON(!pmd_none(*new_pmd));
+
+       /* Set the new pmd */
+       set_pmd_at(mm, new_addr, new_pmd, pmd);
+       flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
+       if (new_ptl != old_ptl)
+               spin_unlock(new_ptl);
+       spin_unlock(old_ptl);
+
+       return true;
+}
+#endif
+
 unsigned long move_page_tables(struct vm_area_struct *vma,
                unsigned long old_addr, struct vm_area_struct *new_vma,
                unsigned long new_addr, unsigned long len,
                bool need_rmap_locks)
 {
        unsigned long extent, next, old_end;
+       struct mmu_notifier_range range;
        pmd_t *old_pmd, *new_pmd;
-       unsigned long mmun_start;       /* For mmu_notifiers */
-       unsigned long mmun_end;         /* For mmu_notifiers */
 
        old_end = old_addr + len;
        flush_cache_range(vma, old_addr, old_end);
 
-       mmun_start = old_addr;
-       mmun_end   = old_end;
-       mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
+       mmu_notifier_range_init(&range, vma->vm_mm, old_addr, old_end);
+       mmu_notifier_invalidate_range_start(&range);
 
        for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
                cond_resched();
@@ -237,8 +281,26 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                        split_huge_pmd(vma, old_pmd, old_addr);
                        if (pmd_trans_unstable(old_pmd))
                                continue;
+               } else if (extent == PMD_SIZE) {
+#ifdef CONFIG_HAVE_MOVE_PMD
+                       /*
+                        * If the extent is PMD-sized, try to speed the move by
+                        * moving at the PMD level if possible.
+                        */
+                       bool moved;
+
+                       if (need_rmap_locks)
+                               take_rmap_locks(vma);
+                       moved = move_normal_pmd(vma, old_addr, new_addr,
+                                       old_end, old_pmd, new_pmd);
+                       if (need_rmap_locks)
+                               drop_rmap_locks(vma);
+                       if (moved)
+                               continue;
+#endif
                }
-               if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
+
+               if (pte_alloc(new_vma->vm_mm, new_pmd))
                        break;
                next = (new_addr + PMD_SIZE) & PMD_MASK;
                if (extent > next - new_addr)
@@ -247,7 +309,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                          new_pmd, new_addr, need_rmap_locks);
        }
 
-       mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
+       mmu_notifier_invalidate_range_end(&range);
 
        return len + old_addr - old_end;        /* how much done */
 }