x86/vmemmap: drop handling of 4K unaligned vmemmap range
authorOscar Salvador <osalvador@suse.de>
Fri, 30 Apr 2021 05:57:12 +0000 (22:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Apr 2021 18:20:38 +0000 (11:20 -0700)
Patch series "Cleanup and fixups for vmemmap handling", v6.

This series contains cleanups to remove dead code that handles unaligned
cases for 4K and 1GB pages (patch#1 and patch#2) when removing the vemmmap
range, and a fix (patch#3) to handle the case when two vmemmap ranges
intersect the same PMD.

This patch (of 4):

remove_pte_table() is prepared to handle the case where either the start
or the end of the range is not PAGE aligned.  This cannot actually happen:

__populate_section_memmap enforces the range to be PMD aligned, so as long
as the size of the struct page remains multiple of 8, the vmemmap range
will be aligned to PAGE_SIZE.

Drop the dead code and place a VM_BUG_ON in vmemmap_{populate,free} to
catch nasty cases.  Note that the VM_BUG_ON is placed in there because
vmemmap_{populate,free= } is the gate of all removing and freeing page
tables logic.

Link: https://lkml.kernel.org/r/20210309214050.4674-1-osalvador@suse.de
Link: https://lkml.kernel.org/r/20210309214050.4674-2-osalvador@suse.de
Signed-off-by: Oscar Salvador <osalvador@suse.de>
Suggested-by: David Hildenbrand <david@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/mm/init_64.c

index 55247451ba85bdc4b01ac6fd96395d0203ce8462..ff312a87e58d8892952273865739cbab0aa399ee 100644 (file)
@@ -962,7 +962,6 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
 {
        unsigned long next, pages = 0;
        pte_t *pte;
-       void *page_addr;
        phys_addr_t phys_addr;
 
        pte = pte_start + pte_index(addr);
@@ -983,42 +982,15 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
                if (phys_addr < (phys_addr_t)0x40000000)
                        return;
 
-               if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
-                       /*
-                        * Do not free direct mapping pages since they were
-                        * freed when offlining, or simply not in use.
-                        */
-                       if (!direct)
-                               free_pagetable(pte_page(*pte), 0);
-
-                       spin_lock(&init_mm.page_table_lock);
-                       pte_clear(&init_mm, addr, pte);
-                       spin_unlock(&init_mm.page_table_lock);
+               if (!direct)
+                       free_pagetable(pte_page(*pte), 0);
 
-                       /* For non-direct mapping, pages means nothing. */
-                       pages++;
-               } else {
-                       /*
-                        * If we are here, we are freeing vmemmap pages since
-                        * direct mapped memory ranges to be freed are aligned.
-                        *
-                        * If we are not removing the whole page, it means
-                        * other page structs in this page are being used and
-                        * we cannot remove them. So fill the unused page_structs
-                        * with 0xFD, and remove the page when it is wholly
-                        * filled with 0xFD.
-                        */
-                       memset((void *)addr, PAGE_INUSE, next - addr);
-
-                       page_addr = page_address(pte_page(*pte));
-                       if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
-                               free_pagetable(pte_page(*pte), 0);
+               spin_lock(&init_mm.page_table_lock);
+               pte_clear(&init_mm, addr, pte);
+               spin_unlock(&init_mm.page_table_lock);
 
-                               spin_lock(&init_mm.page_table_lock);
-                               pte_clear(&init_mm, addr, pte);
-                               spin_unlock(&init_mm.page_table_lock);
-                       }
-               }
+               /* For non-direct mapping, pages means nothing. */
+               pages++;
        }
 
        /* Call free_pte_table() in remove_pmd_table(). */
@@ -1197,6 +1169,9 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct,
 void __ref vmemmap_free(unsigned long start, unsigned long end,
                struct vmem_altmap *altmap)
 {
+       VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
+       VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
+
        remove_pagetable(start, end, false, altmap);
 }
 
@@ -1556,6 +1531,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 {
        int err;
 
+       VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
+       VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
+
        if (end - start < PAGES_PER_SECTION * sizeof(struct page))
                err = vmemmap_populate_basepages(start, end, node, NULL);
        else if (boot_cpu_has(X86_FEATURE_PSE))