x86/mm/pat: factor out setting cachemode into pgprot_set_cachemode()
authorDavid Hildenbrand <david@redhat.com>
Mon, 12 May 2025 12:34:14 +0000 (14:34 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 May 2025 21:55:36 +0000 (14:55 -0700)
VM_PAT annoyed me too much and wasted too much of my time, let's clean PAT
handling up and remove VM_PAT.

This should sort out various issues with VM_PAT we discovered recently,
and will hopefully make the whole code more stable and easier to maintain.

In essence: we stop letting PAT mode mess with VMAs and instead lift what
to track/untrack to the MM core.  We remember per VMA which pfn range we
tracked in a new struct we attach to a VMA (we have space without
exceeding 192 bytes), use a kref to share it among VMAs during
split/mremap/fork, and automatically untrack once the kref drops to 0.

This implies that we'll keep tracking a full pfn range even after
partially unmapping it, until fully unmapping it; but as that case was
mostly broken before, this at least makes it work in a way that is least
intrusive to VMA handling.

Shrinking with mremap() used to work in a hacky way, now we'll similarly
keep the original pfn range tacked even after this form of partial unmap.
Does anybody care about that?  Unlikely.  If we run into issues, we could
likely handled that (adjust the tracking) when our kref drops to 1 while
freeing a VMA.  But it adds more complexity, so avoid that for now.

Briefly tested with the new pfnmap selftests [1].

This patch (of 11):

Let's factor it out to make the code easier to grasp.  Drop one comment
where it is now rather obvious what is happening.

Use it also in pgprot_writecombine()/pgprot_writethrough() where clearing
the old cachemode might not be required, but given that we are already
doing a function call, no need to care about this micro-optimization.

Link: https://lkml.kernel.org/r/20250512123424.637989-1-david@redhat.com
Link: https://lkml.kernel.org/r/20250512123424.637989-2-david@redhat.com
Link: https://lkml.kernel.org/r/20250509153033.952746-1-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: Ingo Molnar <mingo@kernel.org> [x86 bits]
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Dave Airlie <airlied@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Jann Horn <jannh@google.com>
Cc: Jonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: "Masami Hiramatsu (Google)" <mhiramat@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Tvrtko Ursulin <tursulin@ursulin.net>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/x86/mm/pat/memtype.c

index 72d8cbc611583228bc86333ca1b128c47e7eefec..edec5859651d6acb705b641013ae172a0a22bbf1 100644 (file)
@@ -800,6 +800,12 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 }
 #endif /* CONFIG_STRICT_DEVMEM */
 
+static inline void pgprot_set_cachemode(pgprot_t *prot, enum page_cache_mode pcm)
+{
+       *prot = __pgprot((pgprot_val(*prot) & ~_PAGE_CACHE_MASK) |
+                        cachemode2protval(pcm));
+}
+
 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
                                unsigned long size, pgprot_t *vma_prot)
 {
@@ -811,8 +817,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
        if (file->f_flags & O_DSYNC)
                pcm = _PAGE_CACHE_MODE_UC_MINUS;
 
-       *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
-                            cachemode2protval(pcm));
+       pgprot_set_cachemode(vma_prot, pcm);
        return 1;
 }
 
@@ -880,9 +885,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                                (unsigned long long)paddr,
                                (unsigned long long)(paddr + size - 1),
                                cattr_name(pcm));
-                       *vma_prot = __pgprot((pgprot_val(*vma_prot) &
-                                            (~_PAGE_CACHE_MASK)) |
-                                            cachemode2protval(pcm));
+                       pgprot_set_cachemode(vma_prot, pcm);
                }
                return 0;
        }
@@ -907,9 +910,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                 * We allow returning different type than the one requested in
                 * non strict case.
                 */
-               *vma_prot = __pgprot((pgprot_val(*vma_prot) &
-                                     (~_PAGE_CACHE_MASK)) |
-                                    cachemode2protval(pcm));
+               pgprot_set_cachemode(vma_prot, pcm);
        }
 
        if (memtype_kernel_map_sync(paddr, size, pcm) < 0) {
@@ -1060,9 +1061,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
                        return -EINVAL;
        }
 
-       *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
-                        cachemode2protval(pcm));
-
+       pgprot_set_cachemode(prot, pcm);
        return 0;
 }
 
@@ -1073,10 +1072,8 @@ void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
        if (!pat_enabled())
                return;
 
-       /* Set prot based on lookup */
        pcm = lookup_memtype(pfn_t_to_phys(pfn));
-       *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
-                        cachemode2protval(pcm));
+       pgprot_set_cachemode(prot, pcm);
 }
 
 /*
@@ -1115,15 +1112,15 @@ void untrack_pfn_clear(struct vm_area_struct *vma)
 
 pgprot_t pgprot_writecombine(pgprot_t prot)
 {
-       return __pgprot(pgprot_val(prot) |
-                               cachemode2protval(_PAGE_CACHE_MODE_WC));
+       pgprot_set_cachemode(&prot, _PAGE_CACHE_MODE_WC);
+       return prot;
 }
 EXPORT_SYMBOL_GPL(pgprot_writecombine);
 
 pgprot_t pgprot_writethrough(pgprot_t prot)
 {
-       return __pgprot(pgprot_val(prot) |
-                               cachemode2protval(_PAGE_CACHE_MODE_WT));
+       pgprot_set_cachemode(&prot, _PAGE_CACHE_MODE_WT);
+       return prot;
 }
 EXPORT_SYMBOL_GPL(pgprot_writethrough);