mm/vmalloc: leave lazy MMU mode on PTE mapping error
authorAlexander Gordeev <agordeev@linux.ibm.com>
Mon, 23 Jun 2025 07:57:21 +0000 (09:57 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 10 Jul 2025 04:07:52 +0000 (21:07 -0700)
vmap_pages_pte_range() enters the lazy MMU mode, but fails to leave it in
case an error is encountered.

Link: https://lkml.kernel.org/r/20250623075721.2817094-1-agordeev@linux.ibm.com
Fixes: 2ba3e6947aed ("mm/vmalloc: track which page-table levels were modified")
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
Closes: https://lore.kernel.org/r/202506132017.T1l1l6ME-lkp@intel.com/
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmalloc.c

index ab986dd09b6aa1d0660f01a09ff5d4f1f8c424be..6dbcdceecae13484c764e8fab3a86769a0a7064e 100644 (file)
@@ -514,6 +514,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
                pgtbl_mod_mask *mask)
 {
+       int err = 0;
        pte_t *pte;
 
        /*
@@ -530,12 +531,18 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
        do {
                struct page *page = pages[*nr];
 
-               if (WARN_ON(!pte_none(ptep_get(pte))))
-                       return -EBUSY;
-               if (WARN_ON(!page))
-                       return -ENOMEM;
-               if (WARN_ON(!pfn_valid(page_to_pfn(page))))
-                       return -EINVAL;
+               if (WARN_ON(!pte_none(ptep_get(pte)))) {
+                       err = -EBUSY;
+                       break;
+               }
+               if (WARN_ON(!page)) {
+                       err = -ENOMEM;
+                       break;
+               }
+               if (WARN_ON(!pfn_valid(page_to_pfn(page)))) {
+                       err = -EINVAL;
+                       break;
+               }
 
                set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
                (*nr)++;
@@ -543,7 +550,8 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
 
        arch_leave_lazy_mmu_mode();
        *mask |= PGTBL_PTE_MODIFIED;
-       return 0;
+
+       return err;
 }
 
 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,