high_memory defines upper bound on the directly mapped memory. This bound
is defined by the beginning of ZONE_HIGHMEM when a system has high memory
and by the end of memory otherwise.
All this is known to generic memory management initialization code that
can set high_memory while initializing core mm structures.
Add a generic calculation of high_memory to free_area_init() and remove
per-architecture calculation except for the architectures that set and use
high_memory earlier than that.
Link: https://lkml.kernel.org/r/20250313135003.836600-11-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com> [x86]
Tested-by: Mark Brown <broonie@kernel.org>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Guo Ren (csky) <guoren@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russel King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
void __init
mem_init(void)
{
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
memblock_free_all();
}
*/
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
- high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
-
arch_pfn_offset = min(min_low_pfn, min_high_pfn);
kmap_init();
#endif /* CONFIG_HIGHMEM */
}
early_init_fdt_scan_reserved_mem();
-
- high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
}
void __init bootmem_init(void)
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
memblock_free_all();
* initial kernel segment table's physical address.
*/
init_mm.context.ptbase = __pa(init_mm.pgd);
-
- /*
- * Start of high memory area. Will probably need something more
- * fancy if we... get more fancy.
- */
- high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT);
}
#ifndef DMA_RESERVE
void __init mem_init(void)
{
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
memblock_free_all();
}
void __init mem_init(void)
{
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
memblock_free_all();
}
#endif /* !CONFIG_NUMA */
void __init mem_init(void)
{
- high_memory = (void *)__va(memory_start + lowmem_size - 1);
-
/* this will put all memory onto the freelists */
memblock_free_all();
#ifdef CONFIG_HIGHMEM
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
}
#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
free_area_init(max_zone_pfns);
}
#else /* CONFIG_NUMA */
void __init mem_init(void)
{
- high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
setup_zero_pages(); /* This comes from node 0 */
memblock_free_all();
}
void __init mem_init(void)
{
- unsigned long end_mem = memory_end; /* this must not include
- kernel stack at top */
-
- end_mem &= PAGE_MASK;
- high_memory = __va(end_mem);
-
/* this will put all memory onto the freelists */
memblock_free_all();
}
{
BUG_ON(!mem_map);
- high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
-
/* clear the zero-page */
memset((void *)empty_zero_page, 0, PAGE_SIZE);
BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000);
#endif
- high_memory = __va((max_pfn << PAGE_SHIFT));
memblock_free_all();
#ifdef CONFIG_PA11
phys_ram_end = memblock_end_of_DRAM();
min_low_pfn = PFN_UP(phys_ram_base);
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
- high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(0, mm_cpumask(&init_mm));
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-
pv_init();
kfence_split_mapping();
void __init mem_init(void)
{
- pg_data_t *pgdat;
-
- high_memory = NULL;
- for_each_online_pgdat(pgdat)
- high_memory = max_t(void *, high_memory,
- __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
-
memblock_free_all();
/* Set this up early, so we can take care of the zero page */
taint_real_pages();
- high_memory = __va(max_low_pfn << PAGE_SHIFT);
memblock_free_all();
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
}
void __init mem_init(void)
{
- high_memory = __va(last_valid_pfn << PAGE_SHIFT);
-
memblock_free_all();
/*
high_physmem = uml_physmem + physmem_size;
end_iomem = high_physmem + iomem_size;
- high_memory = (void *) end_iomem;
start_vm = VMALLOC_START;
max_low_pfn = e820__end_of_low_ram_pfn();
else
max_low_pfn = max_pfn;
-
- high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
#endif
/* Find and reserve MPTABLE area */
highstart_pfn = max_low_pfn;
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
pages_to_mb(highend_pfn - highstart_pfn));
- high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
-#else
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
highstart_pfn = max_low_pfn;
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
pages_to_mb(highend_pfn - highstart_pfn));
- high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
-#else
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif
printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
pages_to_mb(max_low_pfn));
{
free_highpages();
- high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
-
memblock_free_all();
}
return pte_marker_uffd_wp(vmf->orig_pte);
}
-/*
- * A number of key systems in x86 including ioremap() rely on the assumption
- * that high_memory defines the upper bound on direct map memory, then end
- * of ZONE_NORMAL.
- */
-void *high_memory;
-EXPORT_SYMBOL(high_memory);
-
/*
* Randomize the address space (stacks, mmaps, brk, etc.).
*
EXPORT_SYMBOL(mem_map);
#endif
+/*
+ * high_memory defines the upper bound on direct map memory, then end
+ * of ZONE_NORMAL.
+ */
+void *high_memory;
+EXPORT_SYMBOL(high_memory);
+
#ifdef CONFIG_DEBUG_MEMORY_INIT
int __meminitdata mminit_loglevel;
return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
}
+static void set_high_memory(void)
+{
+ phys_addr_t highmem = memblock_end_of_DRAM();
+
+ /*
+ * Some architectures (e.g. ARM) set high_memory very early and
+ * use it in arch setup code.
+ * If an architecture already set high_memory don't overwrite it
+ */
+ if (high_memory)
+ return;
+
+#ifdef CONFIG_HIGHMEM
+ if (arch_has_descending_max_zone_pfns() ||
+ highmem > PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]))
+ highmem = PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]);
+#endif
+
+ high_memory = phys_to_virt(highmem - 1) + 1;
+}
+
/**
* free_area_init - Initialise all pg_data_t and zone data
* @max_zone_pfn: an array of max PFNs for each zone
/* disable hash distribution for systems with a single node */
fixup_hashdist();
+
+ set_high_memory();
}
/**
#include <asm/mmu_context.h>
#include "internal.h"
-void *high_memory;
-EXPORT_SYMBOL(high_memory);
unsigned long highest_memmap_pfn;
int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
int heap_stack_gap = 0;