mm: mmu_gather: prepare to gather encoded page pointers with flags
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 9 Nov 2022 20:30:50 +0000 (12:30 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 30 Nov 2022 23:58:50 +0000 (15:58 -0800)
This is purely a preparatory patch that makes all the data structures
ready for encoding flags with the mmu_gather page pointers.

The code currently always sets the flag to zero and doesn't use it yet,
but now it's tracking the type state along.  The next step will be to
actually start using it.

Link: https://lkml.kernel.org/r/20221109203051.1835763-3-torvalds@linux-foundation.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/s390/include/asm/tlb.h
include/asm-generic/tlb.h
include/linux/swap.h
mm/mmu_gather.c
mm/swap_state.c

index 3a5c8fb590e55ddfda72e6908ced1f4626e2c7c0..05142226d65dcc73ffa87cec72729f2d75da9656 100644 (file)
@@ -25,7 +25,8 @@
 void __tlb_remove_table(void *_table);
 static inline void tlb_flush(struct mmu_gather *tlb);
 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
-                                         struct page *page, int page_size);
+                                         struct encoded_page *page,
+                                         int page_size);
 
 #define tlb_flush tlb_flush
 #define pte_free_tlb pte_free_tlb
@@ -42,9 +43,10 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
  * has already been freed, so just do free_page_and_swap_cache.
  */
 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
-                                         struct page *page, int page_size)
+                                         struct encoded_page *page,
+                                         int page_size)
 {
-       free_page_and_swap_cache(page);
+       free_page_and_swap_cache(encoded_page_ptr(page));
        return false;
 }
 
index cab7cfebf40bd9c3aa666bb8d28fb3c1dc5c67d7..54d03d1e712eb484e81747d0b90eac677befcfc0 100644 (file)
@@ -246,7 +246,7 @@ struct mmu_gather_batch {
        struct mmu_gather_batch *next;
        unsigned int            nr;
        unsigned int            max;
-       struct page             *pages[];
+       struct encoded_page     *encoded_pages[];
 };
 
 #define MAX_GATHER_BATCH       \
@@ -260,7 +260,8 @@ struct mmu_gather_batch {
  */
 #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
 
-extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
+extern bool __tlb_remove_page_size(struct mmu_gather *tlb,
+                                  struct encoded_page *page,
                                   int page_size);
 #endif
 
@@ -435,13 +436,13 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
                                        struct page *page, int page_size)
 {
-       if (__tlb_remove_page_size(tlb, page, page_size))
+       if (__tlb_remove_page_size(tlb, encode_page(page, 0), page_size))
                tlb_flush_mmu(tlb);
 }
 
 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
-       return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
+       return __tlb_remove_page_size(tlb, encode_page(page, 0), PAGE_SIZE);
 }
 
 /* tlb_remove_page
index fec6647a289af71aace55de0ebc2292bec89f8ce..b61e2007d156860f471f678ea85d80069b5378fd 100644 (file)
@@ -463,7 +463,7 @@ static inline unsigned long total_swapcache_pages(void)
 
 extern void free_swap_cache(struct page *page);
 extern void free_page_and_swap_cache(struct page *);
-extern void free_pages_and_swap_cache(struct page **, int);
+extern void free_pages_and_swap_cache(struct encoded_page **, int);
 /* linux/mm/swapfile.c */
 extern atomic_long_t nr_swap_pages;
 extern long total_swap_pages;
index 3a2c3f8cad2fe8f097fe9352967c7b163d893ebe..382581c4a9f68893184946dd8aa485ed5a26cd78 100644 (file)
@@ -48,7 +48,7 @@ static void tlb_batch_pages_flush(struct mmu_gather *tlb)
        struct mmu_gather_batch *batch;
 
        for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
-               struct page **pages = batch->pages;
+               struct encoded_page **pages = batch->encoded_pages;
 
                do {
                        /*
@@ -77,7 +77,7 @@ static void tlb_batch_list_free(struct mmu_gather *tlb)
        tlb->local.next = NULL;
 }
 
-bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
+bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, int page_size)
 {
        struct mmu_gather_batch *batch;
 
@@ -92,13 +92,13 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
         * Add the page and check if we are full. If so
         * force a flush.
         */
-       batch->pages[batch->nr++] = page;
+       batch->encoded_pages[batch->nr++] = page;
        if (batch->nr == batch->max) {
                if (!tlb_next_batch(tlb))
                        return true;
                batch = tlb->active;
        }
-       VM_BUG_ON_PAGE(batch->nr > batch->max, page);
+       VM_BUG_ON_PAGE(batch->nr > batch->max, encoded_page_ptr(page));
 
        return false;
 }
index 40fe6f23e10578026b14a91b7d19a372a932d3d7..2927507b43d8192f876624199cb077cce7139242 100644 (file)
@@ -303,15 +303,12 @@ void free_page_and_swap_cache(struct page *page)
  * Passed an array of pages, drop them all from swapcache and then release
  * them.  They are removed from the LRU and freed if this is their last use.
  */
-void free_pages_and_swap_cache(struct page **pages, int nr)
+void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
 {
-       struct page **pagep = pages;
-       int i;
-
        lru_add_drain();
-       for (i = 0; i < nr; i++)
-               free_swap_cache(pagep[i]);
-       release_pages(pagep, nr);
+       for (int i = 0; i < nr; i++)
+               free_swap_cache(encoded_page_ptr(pages[i]));
+       release_pages(pages, nr);
 }
 
 static inline bool swap_use_vma_readahead(void)