mm/hmm: invalidate device page table at start of invalidation
[linux-2.6-block.git] / mm / truncate.c
index 1d2fb2dca96fcda760471166b22a09eb4b921418..45d68e90b7037669fce5b50849c4d5964451c2d4 100644 (file)
 static inline void __clear_shadow_entry(struct address_space *mapping,
                                pgoff_t index, void *entry)
 {
-       struct radix_tree_node *node;
-       void **slot;
+       XA_STATE(xas, &mapping->i_pages, index);
 
-       if (!__radix_tree_lookup(&mapping->i_pages, index, &node, &slot))
+       xas_set_update(&xas, workingset_update_node);
+       if (xas_load(&xas) != entry)
                return;
-       if (*slot != entry)
-               return;
-       __radix_tree_replace(&mapping->i_pages, node, slot, NULL,
-                            workingset_update_node);
+       xas_store(&xas, NULL);
        mapping->nrexceptional--;
 }
 
@@ -70,7 +67,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
                return;
 
        for (j = 0; j < pagevec_count(pvec); j++)
-               if (radix_tree_exceptional_entry(pvec->pages[j]))
+               if (xa_is_value(pvec->pages[j]))
                        break;
 
        if (j == pagevec_count(pvec))
@@ -85,7 +82,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
                struct page *page = pvec->pages[i];
                pgoff_t index = indices[i];
 
-               if (!radix_tree_exceptional_entry(page)) {
+               if (!xa_is_value(page)) {
                        pvec->pages[j++] = page;
                        continue;
                }
@@ -347,7 +344,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        if (index >= end)
                                break;
 
-                       if (radix_tree_exceptional_entry(page))
+                       if (xa_is_value(page))
                                continue;
 
                        if (!trylock_page(page))
@@ -442,7 +439,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                                break;
                        }
 
-                       if (radix_tree_exceptional_entry(page))
+                       if (xa_is_value(page))
                                continue;
 
                        lock_page(page);
@@ -561,7 +558,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
                        if (index > end)
                                break;
 
-                       if (radix_tree_exceptional_entry(page)) {
+                       if (xa_is_value(page)) {
                                invalidate_exceptional_entry(mapping, index,
                                                             page);
                                continue;
@@ -692,7 +689,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                        if (index > end)
                                break;
 
-                       if (radix_tree_exceptional_entry(page)) {
+                       if (xa_is_value(page)) {
                                if (!invalidate_exceptional_entry2(mapping,
                                                                   index, page))
                                        ret = -EBUSY;
@@ -738,10 +735,10 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                index++;
        }
        /*
-        * For DAX we invalidate page tables after invalidating radix tree.  We
+        * For DAX we invalidate page tables after invalidating page cache.  We
         * could invalidate page tables while invalidating each entry however
         * that would be expensive. And doing range unmapping before doesn't
-        * work as we have no cheap way to find whether radix tree entry didn't
+        * work as we have no cheap way to find whether page cache entry didn't
         * get remapped later.
         */
        if (dax_mapping(mapping)) {