1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5 #include <linux/init.h>
6 #include <linux/export.h>
7 #include <linux/signal.h>
8 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/pagemap.h>
15 #include <linux/memblock.h>
16 #include <linux/memremap.h>
18 #include <linux/mman.h>
19 #include <linux/highmem.h>
20 #include <linux/swap.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pfn.h>
23 #include <linux/hardirq.h>
24 #include <linux/gfp.h>
25 #include <linux/initrd.h>
26 #include <linux/mmzone.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/bootinfo.h>
32 #include <asm/mmu_context.h>
33 #include <asm/sections.h>
34 #include <asm/pgtable.h>
35 #include <asm/pgalloc.h>
39 * We have up to 8 empty zeroed pages so we can map one of the right colour
40 * when needed. Since page is never written to after the initialization we
41 * don't have to care about aliases on other CPUs.
43 unsigned long empty_zero_page, zero_page_mask;
44 EXPORT_SYMBOL_GPL(empty_zero_page);
45 EXPORT_SYMBOL(zero_page_mask);
47 void setup_zero_pages(void)
49 unsigned int order, i;
54 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
56 panic("Oh boy, that early out of memory?");
58 page = virt_to_page((void *)empty_zero_page);
59 split_page(page, order);
60 for (i = 0; i < (1 << order); i++, page++)
61 mark_page_reserved(page);
63 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
66 void copy_user_highpage(struct page *to, struct page *from,
67 unsigned long vaddr, struct vm_area_struct *vma)
71 vto = kmap_atomic(to);
72 vfrom = kmap_atomic(from);
73 copy_page(vto, vfrom);
76 /* Make sure this page is cleared on other CPU's too before using it */
80 int __ref page_is_ram(unsigned long pfn)
82 unsigned long addr = PFN_PHYS(pfn);
84 return memblock_is_memory(addr) && !memblock_is_reserved(addr);
88 void __init paging_init(void)
90 unsigned long max_zone_pfns[MAX_NR_ZONES];
92 #ifdef CONFIG_ZONE_DMA
93 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
95 #ifdef CONFIG_ZONE_DMA32
96 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
98 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
100 free_area_init(max_zone_pfns);
103 void __init mem_init(void)
105 max_mapnr = max_low_pfn;
106 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
109 setup_zero_pages(); /* Setup zeroed pages. */
111 #endif /* !CONFIG_NUMA */
113 void __ref free_initmem(void)
115 free_initmem_default(POISON_FREE_INITMEM);
118 #ifdef CONFIG_MEMORY_HOTPLUG
119 int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
121 unsigned long start_pfn = start >> PAGE_SHIFT;
122 unsigned long nr_pages = size >> PAGE_SHIFT;
125 ret = __add_pages(nid, start_pfn, nr_pages, params);
128 pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
134 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
136 unsigned long start_pfn = start >> PAGE_SHIFT;
137 unsigned long nr_pages = size >> PAGE_SHIFT;
138 struct page *page = pfn_to_page(start_pfn);
140 /* With altmap the first mapped page is offset from @start */
142 page += vmem_altmap_offset(altmap);
143 __remove_pages(start_pfn, nr_pages, altmap);
147 int memory_add_physaddr_to_nid(u64 start)
149 return pa_to_nid(start);
151 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
155 static pte_t *fixmap_pte(unsigned long addr)
162 pgd = pgd_offset_k(addr);
163 p4d = p4d_offset(pgd, addr);
165 if (pgd_none(*pgd)) {
166 pud_t *new __maybe_unused;
168 new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
169 pgd_populate(&init_mm, pgd, new);
170 #ifndef __PAGETABLE_PUD_FOLDED
171 pud_init((unsigned long)new, (unsigned long)invalid_pmd_table);
175 pud = pud_offset(p4d, addr);
176 if (pud_none(*pud)) {
177 pmd_t *new __maybe_unused;
179 new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
180 pud_populate(&init_mm, pud, new);
181 #ifndef __PAGETABLE_PMD_FOLDED
182 pmd_init((unsigned long)new, (unsigned long)invalid_pte_table);
186 pmd = pmd_offset(pud, addr);
187 if (pmd_none(*pmd)) {
188 pte_t *new __maybe_unused;
190 new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
191 pmd_populate_kernel(&init_mm, pmd, new);
194 return pte_offset_kernel(pmd, addr);
197 void __init __set_fixmap(enum fixed_addresses idx,
198 phys_addr_t phys, pgprot_t flags)
200 unsigned long addr = __fix_to_virt(idx);
203 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
205 ptep = fixmap_pte(addr);
206 if (!pte_none(*ptep)) {
211 if (pgprot_val(flags))
212 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
214 pte_clear(&init_mm, addr, ptep);
215 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
220 * Align swapper_pg_dir in to 64K, allows its address to be loaded
221 * with a single LUI instruction in the TLB handlers. If we used
222 * __aligned(64K), its size would get rounded up to the alignment
223 * size, and waste space. So we place it in its own section and align
224 * it in the linker script.
226 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
228 pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
229 #ifndef __PAGETABLE_PUD_FOLDED
230 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
232 #ifndef __PAGETABLE_PMD_FOLDED
233 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
234 EXPORT_SYMBOL_GPL(invalid_pmd_table);
236 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
237 EXPORT_SYMBOL(invalid_pte_table);