mm: move more expensive part of XA setup out of mapping check
authorJens Axboe <axboe@kernel.dk>
Fri, 5 Nov 2021 20:37:13 +0000 (13:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 6 Nov 2021 20:30:34 +0000 (13:30 -0700)
The fast path here is not needing any writeback, yet we spend time
setting up the xarray lookup data upfront.  Move the part that actually
needs to iterate the address space mapping into a separate helper,
saving ~30% of the time here.

Link: https://lkml.kernel.org/r/49f67983-b802-8929-edab-d807f745c9ca@kernel.dk
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/filemap.c

index 28c57c2536d0a1be8c1bdd4561616a367aa7814e..938f52702a7aba61a723ed4d0d5ae46e7b97a071 100644 (file)
@@ -639,6 +639,30 @@ static bool mapping_needs_writeback(struct address_space *mapping)
        return mapping->nrpages;
 }
 
+static bool filemap_range_has_writeback(struct address_space *mapping,
+                                       loff_t start_byte, loff_t end_byte)
+{
+       XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
+       pgoff_t max = end_byte >> PAGE_SHIFT;
+       struct page *page;
+
+       if (end_byte < start_byte)
+               return false;
+
+       rcu_read_lock();
+       xas_for_each(&xas, page, max) {
+               if (xas_retry(&xas, page))
+                       continue;
+               if (xa_is_value(page))
+                       continue;
+               if (PageDirty(page) || PageLocked(page) || PageWriteback(page))
+                       break;
+       }
+       rcu_read_unlock();
+       return page != NULL;
+
+}
+
 /**
  * filemap_range_needs_writeback - check if range potentially needs writeback
  * @mapping:           address space within which to check
@@ -656,29 +680,12 @@ static bool mapping_needs_writeback(struct address_space *mapping)
 bool filemap_range_needs_writeback(struct address_space *mapping,
                                   loff_t start_byte, loff_t end_byte)
 {
-       XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
-       pgoff_t max = end_byte >> PAGE_SHIFT;
-       struct page *page;
-
        if (!mapping_needs_writeback(mapping))
                return false;
        if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
            !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
                return false;
-       if (end_byte < start_byte)
-               return false;
-
-       rcu_read_lock();
-       xas_for_each(&xas, page, max) {
-               if (xas_retry(&xas, page))
-                       continue;
-               if (xa_is_value(page))
-                       continue;
-               if (PageDirty(page) || PageLocked(page) || PageWriteback(page))
-                       break;
-       }
-       rcu_read_unlock();
-       return page != NULL;
+       return filemap_range_has_writeback(mapping, start_byte, end_byte);
 }
 EXPORT_SYMBOL_GPL(filemap_range_needs_writeback);