mm: replace xa_get_order with xas_get_order where appropriate
authorShakeel Butt <shakeel.butt@linux.dev>
Fri, 6 Sep 2024 23:05:12 +0000 (16:05 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 9 Sep 2024 23:39:17 +0000 (16:39 -0700)
The tracing of invalidation and truncation operations on large files
showed that xa_get_order() is among the top functions where kernel spends
a lot of CPUs.  xa_get_order() needs to traverse the tree to reach the
right node for a given index and then extract the order of the entry.
However it seems like at many places it is being called within an already
happening tree traversal where there is no need to do another traversal.
Just use xas_get_order() at those places.

Link: https://lkml.kernel.org/r/20240906230512.124643-1-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/filemap.c
mm/shmem.c

index 7eb4637ea199ce0769fb563ba1a1cdaaa67cd0c0..6f14d80feb372ec6d8d169d28e3e61424fe08a47 100644 (file)
@@ -2112,7 +2112,7 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
                        VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
                                        folio);
                } else {
-                       nr = 1 << xa_get_order(&mapping->i_pages, xas.xa_index);
+                       nr = 1 << xas_get_order(&xas);
                        base = xas.xa_index & ~(nr - 1);
                        /* Omit order>0 value which begins before the start */
                        if (base < *start)
@@ -3001,7 +3001,7 @@ unlock:
 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
 {
        if (xa_is_value(folio))
-               return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
+               return PAGE_SIZE << xas_get_order(xas);
        return folio_size(folio);
 }
 
@@ -4297,7 +4297,7 @@ static void filemap_cachestat(struct address_space *mapping,
                if (xas_retry(&xas, folio))
                        continue;
 
-               order = xa_get_order(xas.xa, xas.xa_index);
+               order = xas_get_order(&xas);
                nr_pages = 1 << order;
                folio_first_index = round_down(xas.xa_index, 1 << order);
                folio_last_index = folio_first_index + nr_pages - 1;
index 74f093d88c782f892b51c9086f15b5d6df5fd7db..361affdf399094248fb0e152dd472802185404ec 100644 (file)
@@ -890,7 +890,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
                if (xas_retry(&xas, page))
                        continue;
                if (xa_is_value(page))
-                       swapped += 1 << xa_get_order(xas.xa, xas.xa_index);
+                       swapped += 1 << xas_get_order(&xas);
                if (xas.xa_index == max)
                        break;
                if (need_resched()) {