net/mlx4_core: Fix reset flow when in command polling mode
[linux-2.6-block.git] / mm / mremap.c
index def01d86e36fd370f9764a9cdb746f3925fbbf8e..3320616ed93f4bb56ced4ad7bc9f6af9be4ebd31 100644 (file)
@@ -191,6 +191,52 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
                drop_rmap_locks(vma);
 }
 
+#ifdef CONFIG_HAVE_MOVE_PMD
+static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+                 unsigned long new_addr, unsigned long old_end,
+                 pmd_t *old_pmd, pmd_t *new_pmd)
+{
+       spinlock_t *old_ptl, *new_ptl;
+       struct mm_struct *mm = vma->vm_mm;
+       pmd_t pmd;
+
+       if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK)
+           || old_end - old_addr < PMD_SIZE)
+               return false;
+
+       /*
+        * The destination pmd shouldn't be established, free_pgtables()
+        * should have release it.
+        */
+       if (WARN_ON(!pmd_none(*new_pmd)))
+               return false;
+
+       /*
+        * We don't have to worry about the ordering of src and dst
+        * ptlocks because exclusive mmap_sem prevents deadlock.
+        */
+       old_ptl = pmd_lock(vma->vm_mm, old_pmd);
+       new_ptl = pmd_lockptr(mm, new_pmd);
+       if (new_ptl != old_ptl)
+               spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+
+       /* Clear the pmd */
+       pmd = *old_pmd;
+       pmd_clear(old_pmd);
+
+       VM_BUG_ON(!pmd_none(*new_pmd));
+
+       /* Set the new pmd */
+       set_pmd_at(mm, new_addr, new_pmd, pmd);
+       flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
+       if (new_ptl != old_ptl)
+               spin_unlock(new_ptl);
+       spin_unlock(old_ptl);
+
+       return true;
+}
+#endif
+
 unsigned long move_page_tables(struct vm_area_struct *vma,
                unsigned long old_addr, struct vm_area_struct *new_vma,
                unsigned long new_addr, unsigned long len,
@@ -235,8 +281,26 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                        split_huge_pmd(vma, old_pmd, old_addr);
                        if (pmd_trans_unstable(old_pmd))
                                continue;
+               } else if (extent == PMD_SIZE) {
+#ifdef CONFIG_HAVE_MOVE_PMD
+                       /*
+                        * If the extent is PMD-sized, try to speed the move by
+                        * moving at the PMD level if possible.
+                        */
+                       bool moved;
+
+                       if (need_rmap_locks)
+                               take_rmap_locks(vma);
+                       moved = move_normal_pmd(vma, old_addr, new_addr,
+                                       old_end, old_pmd, new_pmd);
+                       if (need_rmap_locks)
+                               drop_rmap_locks(vma);
+                       if (moved)
+                               continue;
+#endif
                }
-               if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
+
+               if (pte_alloc(new_vma->vm_mm, new_pmd))
                        break;
                next = (new_addr + PMD_SIZE) & PMD_MASK;
                if (extent > next - new_addr)