mm/rmap: use folio->_mapcount for small folios
authorDavid Hildenbrand <david@redhat.com>
Fri, 16 Aug 2024 10:32:46 +0000 (12:32 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Sep 2024 04:15:37 +0000 (21:15 -0700)
We have some cases left whereby we operate on small folios and still refer
to page->_mapcount.  Let's just use folio->_mapcount instead, which
currently still overlays page->_mapcount, so no change.

This change will make it easier to later spot any remaining users of
page->_mapcount that target tail pages.

Link: https://lkml.kernel.org/r/20240816103246.719209-1-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/rmap.h
mm/rmap.c

index 0978c64f49d833f19dd2559ab3042c243708300c..91b5935e8485e7d5023425ed7535405178e9fa7a 100644 (file)
@@ -331,7 +331,7 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
        switch (level) {
        case RMAP_LEVEL_PTE:
                if (!folio_test_large(folio)) {
-                       atomic_inc(&page->_mapcount);
+                       atomic_inc(&folio->_mapcount);
                        break;
                }
 
@@ -425,7 +425,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
                if (!folio_test_large(folio)) {
                        if (PageAnonExclusive(page))
                                ClearPageAnonExclusive(page);
-                       atomic_inc(&page->_mapcount);
+                       atomic_inc(&folio->_mapcount);
                        break;
                }
 
index a6b9cd0b2b18ed3461bdbb6c8269f41a771144b2..1103a536e474065cedcaabb5bd30f72134e4bbc1 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1165,7 +1165,7 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
        switch (level) {
        case RMAP_LEVEL_PTE:
                if (!folio_test_large(folio)) {
-                       nr = atomic_inc_and_test(&page->_mapcount);
+                       nr = atomic_inc_and_test(&folio->_mapcount);
                        break;
                }
 
@@ -1535,7 +1535,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
        switch (level) {
        case RMAP_LEVEL_PTE:
                if (!folio_test_large(folio)) {
-                       nr = atomic_add_negative(-1, &page->_mapcount);
+                       nr = atomic_add_negative(-1, &folio->_mapcount);
                        break;
                }