Currently, implementation of mem_init() in every architecture consists of
one or more of the following:
* initializations that must run before page allocator is active, for
instance swiotlb_init()
* a call to memblock_free_all() to release all the memory to the buddy
allocator
* initializations that must run after page allocator is ready and there is
no arch-specific hook other than mem_init() for that, like for example
register_page_bootmem_info() in x86 and sparc64 or simple setting of
mem_init_done = 1 in several architectures
* a bunch of semi-related stuff that apparently had no better place to
live, for example a ton of BUILD_BUG_ON()s in parisc.
Introduce arch_mm_preinit() that will be the first thing called from
mm_core_init(). On architectures that have initializations that must happen
before the page allocator is ready, move those into arch_mm_preinit() along
with the code that does not depend on ordering with page allocator setup.
On several architectures this results in reduction of mem_init() to a
single call to memblock_free_all() that allows its consolidation next.
Link: https://lkml.kernel.org/r/20250313135003.836600-13-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com> [x86]
Tested-by: Mark Brown <broonie@kernel.org>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Guo Ren (csky) <guoren@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russel King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
free_area_init(max_zone_pfn);
}
-static void __init highmem_init(void)
+void __init arch_mm_preinit(void)
{
#ifdef CONFIG_HIGHMEM
memblock_phys_free(high_mem_start, high_mem_sz);
#endif
+
+ BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE);
+ BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE);
+ BUILD_BUG_ON((PTRS_PER_PMD * sizeof(pmd_t)) > PAGE_SIZE);
+ BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE);
}
/*
*/
void __init mem_init(void)
{
- highmem_init();
memblock_free_all();
-
- BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE);
- BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE);
- BUILD_BUG_ON((PTRS_PER_PMD * sizeof(pmd_t)) > PAGE_SIZE);
- BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE);
}
#ifdef CONFIG_HIGHMEM
*p++ = 0xe7fddef0;
}
-/*
- * mem_init() marks the free areas in the mem_map and tells us how much
- * memory is free. This is done after various parts of the system have
- * claimed their memory after the kernel image.
- */
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
#ifdef CONFIG_ARM_LPAE
swiotlb_init(max_pfn > arm_dma_pfn_limit, SWIOTLB_VERBOSE);
memblock_phys_free(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
#endif
- /* this will put all unused low memory onto the freelists */
- memblock_free_all();
-
/*
* Check boundaries twice: Some fundamental inconsistencies can
* be detected at build time already.
#endif
}
+/*
+ * mem_init() marks the free areas in the mem_map and tells us how much
+ * memory is free. This is done after various parts of the system have
+ * claimed their memory after the kernel image.
+ */
+void __init mem_init(void)
+{
+ /* this will put all unused low memory onto the freelists */
+ memblock_free_all();
+}
+
#ifdef CONFIG_STRICT_KERNEL_RWX
struct section_perm {
const char *name;
memblock_dump_all();
}
-/*
- * mem_init() marks the free areas in the mem_map and tells us how much memory
- * is free. This is done after various parts of the system have claimed their
- * memory after the kernel image.
- */
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
unsigned int flags = SWIOTLB_VERBOSE;
bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit);
swiotlb_init(swiotlb, flags);
swiotlb_update_mem_attributes();
- /* this will put all unused low memory onto the freelists */
- memblock_free_all();
-
/*
* Check boundaries twice: Some fundamental inconsistencies can be
* detected at build time already.
}
}
+/*
+ * mem_init() marks the free areas in the mem_map and tells us how much memory
+ * is free. This is done after various parts of the system have claimed their
+ * memory after the kernel image.
+ */
+void __init mem_init(void)
+{
+ /* this will put all unused low memory onto the freelists */
+ memblock_free_all();
+}
+
void free_initmem(void)
{
void *lm_init_begin = lm_alias(__init_begin);
static struct kcore_list kcore_kseg0;
#endif
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
/*
* When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
maar_init();
setup_zero_pages(); /* Setup zeroed pages. */
- memblock_free_all();
#ifdef CONFIG_64BIT
if ((unsigned long) &_text > (unsigned long) CKSEG0)
#endif
}
#else /* CONFIG_NUMA */
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
setup_zero_pages(); /* This comes from node 0 */
- memblock_free_all();
}
#endif /* !CONFIG_NUMA */
+void __init mem_init(void)
+{
+ memblock_free_all();
+}
+
void free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
unsigned long pfn;
mark_nonram_nosave();
}
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
/*
* book3s is limited to 16 page sizes due to encoding this in
kasan_late_init();
- memblock_free_all();
-
#if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP)
/*
* If smp is enabled, next_tlbcam_idx is initialized in the cpu up
#endif /* CONFIG_PPC32 */
}
+void __init mem_init(void)
+{
+ memblock_free_all();
+}
+
void free_initmem(void)
{
ppc_md.progress = ppc_printk_progress;
static void print_vm_layout(void) { }
#endif /* CONFIG_DEBUG_VM */
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
bool swiotlb = max_pfn > PFN_DOWN(dma32_phys_limit);
#ifdef CONFIG_FLATMEM
}
swiotlb_init(swiotlb, SWIOTLB_VERBOSE);
- memblock_free_all();
print_vm_layout();
}
+void __init mem_init(void)
+{
+ memblock_free_all();
+}
+
/* Limit the memory size via mem. */
static phys_addr_t memory_limit;
#ifdef CONFIG_XIP_KERNEL
swiotlb_update_mem_attributes();
}
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(0, mm_cpumask(&init_mm));
kfence_split_mapping();
setup_zero_pages(); /* Setup zeroed pages. */
+}
+void __init mem_init(void)
+{
/* this will put all low memory onto the freelists */
memblock_free_all();
}
}
}
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
int i;
memset(sparc_valid_addr_bitmap, 0, i << 2);
taint_real_pages();
+}
+void __init mem_init(void)
+{
memblock_free_all();
}
/* Used during early boot */
static unsigned long brk_end;
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
memblock_free((void *)brk_end, uml_reserved - brk_end);
uml_reserved = brk_end;
+ max_pfn = max_low_pfn;
+}
+void __init mem_init(void)
+{
/* this will put all low memory onto the freelists */
memblock_free_all();
- max_pfn = max_low_pfn;
kmalloc_ok = 1;
}
panic("Linux doesn't support CPUs with broken WP.");
}
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
pci_iommu_alloc();
#ifdef CONFIG_FLATMEM
BUG_ON(!mem_map);
#endif
+}
+
+void __init mem_init(void)
+{
/* this will put all low memory onto the freelists */
memblock_free_all();
panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl);
}
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
pci_iommu_alloc();
+}
+void __init mem_init(void)
+{
/* clear_bss() already clear the empty_zero_page */
/* this will put all memory onto the freelists */
extern int sysctl_page_lock_unfairness;
+void arch_mm_preinit(void);
void mm_core_init(void);
void init_mm_internals(void);
);
}
+void __init __weak arch_mm_preinit(void)
+{
+}
+
/*
* Set up kernel memory allocators
*/
void __init mm_core_init(void)
{
+ arch_mm_preinit();
hugetlb_bootmem_alloc();
/* Initializations relying on SMP setup */