mm/swap: optimise lru_add_drain_cpu()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 17 Jun 2022 17:50:09 +0000 (18:50 +0100)
committerakpm <akpm@linux-foundation.org>
Mon, 4 Jul 2022 01:08:46 +0000 (18:08 -0700)
Do the per-cpu dereferencing of the fbatches once which saves 14 bytes
of text and several percpu relocations.

Link: https://lkml.kernel.org/r/20220617175020.717127-12-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/swap.c

index df78c4c4dbeb5627926674f2f3fd539dff28fa65..84318692db6a2a07fd14cd5a4e4406ca160f25bd 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -620,7 +620,8 @@ static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio)
  */
 void lru_add_drain_cpu(int cpu)
 {
-       struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_add, cpu);
+       struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
+       struct folio_batch *fbatch = &fbatches->lru_add;
 
        if (folio_batch_count(fbatch))
                folio_batch_move_lru(fbatch, lru_add_fn);
@@ -636,15 +637,15 @@ void lru_add_drain_cpu(int cpu)
                local_unlock_irqrestore(&lru_rotate.lock, flags);
        }
 
-       fbatch = &per_cpu(cpu_fbatches.lru_deactivate_file, cpu);
+       fbatch = &fbatches->lru_deactivate_file;
        if (folio_batch_count(fbatch))
                folio_batch_move_lru(fbatch, lru_deactivate_file_fn);
 
-       fbatch = &per_cpu(cpu_fbatches.lru_deactivate, cpu);
+       fbatch = &fbatches->lru_deactivate;
        if (folio_batch_count(fbatch))
                folio_batch_move_lru(fbatch, lru_deactivate_fn);
 
-       fbatch = &per_cpu(cpu_fbatches.lru_lazyfree, cpu);
+       fbatch = &fbatches->lru_lazyfree;
        if (folio_batch_count(fbatch))
                folio_batch_move_lru(fbatch, lru_lazyfree_fn);