From c519c3c0a1133c408e83a383aa4dd30010aa5d71 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 18 Aug 2025 18:39:13 +0200 Subject: [PATCH] mm/kasan: avoid lazy MMU mode hazards Functions __kasan_populate_vmalloc() and __kasan_depopulate_vmalloc() use apply_to_pte_range(), which enters lazy MMU mode. In that mode updating PTEs may not be observed until the mode is left. That may lead to a situation in which otherwise correct reads and writes to a PTE using ptep_get(), set_pte(), pte_clear() and other access primitives bring wrong results when the vmalloc shadow memory is being (de-)populated. To avoid these hazards leave the lazy MMU mode before and re-enter it after each PTE manipulation. Link: https://lkml.kernel.org/r/0d2efb7ddddbff6b288fbffeeb10166e90771718.1755528662.git.agordeev@linux.ibm.com Fixes: 3c5c3cfb9ef4 ("kasan: support backing vmalloc space with real shadow memory") Signed-off-by: Alexander Gordeev Cc: Andrey Ryabinin Cc: Daniel Axtens Cc: Marc Rutland Cc: Ryan Roberts Signed-off-by: Andrew Morton --- mm/kasan/shadow.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 4d846d146d02..e2ceebf737ef 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -305,6 +305,8 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, pte_t pte; int index; + arch_leave_lazy_mmu_mode(); + index = PFN_DOWN(addr - data->start); page = data->pages[index]; __memset(page_to_virt(page), KASAN_VMALLOC_INVALID, PAGE_SIZE); @@ -317,6 +319,8 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, } spin_unlock(&init_mm.page_table_lock); + arch_enter_lazy_mmu_mode(); + return 0; } @@ -461,6 +465,8 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, pte_t pte; int none; + arch_leave_lazy_mmu_mode(); + spin_lock(&init_mm.page_table_lock); pte = ptep_get(ptep); none = pte_none(pte); @@ -471,6 +477,8 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, if (likely(!none)) __free_page(pfn_to_page(pte_pfn(pte))); + arch_enter_lazy_mmu_mode(); + return 0; } -- 2.25.1