From: David Hildenbrand Date: Mon, 12 May 2025 12:34:14 +0000 (+0200) Subject: x86/mm/pat: factor out setting cachemode into pgprot_set_cachemode() X-Git-Tag: v6.16-rc1~92^2~37 X-Git-Url: https://git.kernel.dk/?a=commitdiff_plain;h=ed1a7814036c60ced6198548558342ef75af3ad8;p=linux-block.git x86/mm/pat: factor out setting cachemode into pgprot_set_cachemode() VM_PAT annoyed me too much and wasted too much of my time, let's clean PAT handling up and remove VM_PAT. This should sort out various issues with VM_PAT we discovered recently, and will hopefully make the whole code more stable and easier to maintain. In essence: we stop letting PAT mode mess with VMAs and instead lift what to track/untrack to the MM core. We remember per VMA which pfn range we tracked in a new struct we attach to a VMA (we have space without exceeding 192 bytes), use a kref to share it among VMAs during split/mremap/fork, and automatically untrack once the kref drops to 0. This implies that we'll keep tracking a full pfn range even after partially unmapping it, until fully unmapping it; but as that case was mostly broken before, this at least makes it work in a way that is least intrusive to VMA handling. Shrinking with mremap() used to work in a hacky way, now we'll similarly keep the original pfn range tacked even after this form of partial unmap. Does anybody care about that? Unlikely. If we run into issues, we could likely handled that (adjust the tracking) when our kref drops to 1 while freeing a VMA. But it adds more complexity, so avoid that for now. Briefly tested with the new pfnmap selftests [1]. This patch (of 11): Let's factor it out to make the code easier to grasp. Drop one comment where it is now rather obvious what is happening. Use it also in pgprot_writecombine()/pgprot_writethrough() where clearing the old cachemode might not be required, but given that we are already doing a function call, no need to care about this micro-optimization. Link: https://lkml.kernel.org/r/20250512123424.637989-1-david@redhat.com Link: https://lkml.kernel.org/r/20250512123424.637989-2-david@redhat.com Link: https://lkml.kernel.org/r/20250509153033.952746-1-david@redhat.com [1] Signed-off-by: David Hildenbrand Reviewed-by: Lorenzo Stoakes Acked-by: Ingo Molnar [x86 bits] Reviewed-by: Liam R. Howlett Cc: Andy Lutomirski Cc: Borislav Betkov Cc: Dave Airlie Cc: David Hildenbrand Cc: "H. Peter Anvin" Cc: Jani Nikula Cc: Jann Horn Cc: Jonas Lahtinen Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Peter Xu Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Steven Rostedt Cc: Thomas Gleinxer Cc: Tvrtko Ursulin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 72d8cbc61158..edec5859651d 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -800,6 +800,12 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) } #endif /* CONFIG_STRICT_DEVMEM */ +static inline void pgprot_set_cachemode(pgprot_t *prot, enum page_cache_mode pcm) +{ + *prot = __pgprot((pgprot_val(*prot) & ~_PAGE_CACHE_MASK) | + cachemode2protval(pcm)); +} + int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { @@ -811,8 +817,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, if (file->f_flags & O_DSYNC) pcm = _PAGE_CACHE_MODE_UC_MINUS; - *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | - cachemode2protval(pcm)); + pgprot_set_cachemode(vma_prot, pcm); return 1; } @@ -880,9 +885,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, (unsigned long long)paddr, (unsigned long long)(paddr + size - 1), cattr_name(pcm)); - *vma_prot = __pgprot((pgprot_val(*vma_prot) & - (~_PAGE_CACHE_MASK)) | - cachemode2protval(pcm)); + pgprot_set_cachemode(vma_prot, pcm); } return 0; } @@ -907,9 +910,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, * We allow returning different type than the one requested in * non strict case. */ - *vma_prot = __pgprot((pgprot_val(*vma_prot) & - (~_PAGE_CACHE_MASK)) | - cachemode2protval(pcm)); + pgprot_set_cachemode(vma_prot, pcm); } if (memtype_kernel_map_sync(paddr, size, pcm) < 0) { @@ -1060,9 +1061,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, return -EINVAL; } - *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | - cachemode2protval(pcm)); - + pgprot_set_cachemode(prot, pcm); return 0; } @@ -1073,10 +1072,8 @@ void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) if (!pat_enabled()) return; - /* Set prot based on lookup */ pcm = lookup_memtype(pfn_t_to_phys(pfn)); - *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | - cachemode2protval(pcm)); + pgprot_set_cachemode(prot, pcm); } /* @@ -1115,15 +1112,15 @@ void untrack_pfn_clear(struct vm_area_struct *vma) pgprot_t pgprot_writecombine(pgprot_t prot) { - return __pgprot(pgprot_val(prot) | - cachemode2protval(_PAGE_CACHE_MODE_WC)); + pgprot_set_cachemode(&prot, _PAGE_CACHE_MODE_WC); + return prot; } EXPORT_SYMBOL_GPL(pgprot_writecombine); pgprot_t pgprot_writethrough(pgprot_t prot) { - return __pgprot(pgprot_val(prot) | - cachemode2protval(_PAGE_CACHE_MODE_WT)); + pgprot_set_cachemode(&prot, _PAGE_CACHE_MODE_WT); + return prot; } EXPORT_SYMBOL_GPL(pgprot_writethrough);