Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * Initialize MMU support. | |
4 | * | |
5 | * Copyright (C) 1998-2003 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | */ | |
1da177e4 LT |
8 | #include <linux/kernel.h> |
9 | #include <linux/init.h> | |
10 | ||
9f4df96b | 11 | #include <linux/dma-map-ops.h> |
974f83ec | 12 | #include <linux/dmar.h> |
1da177e4 LT |
13 | #include <linux/efi.h> |
14 | #include <linux/elf.h> | |
98e4ae8a | 15 | #include <linux/memblock.h> |
1da177e4 | 16 | #include <linux/mm.h> |
3f07c014 | 17 | #include <linux/sched/signal.h> |
1da177e4 LT |
18 | #include <linux/mmzone.h> |
19 | #include <linux/module.h> | |
20 | #include <linux/personality.h> | |
21 | #include <linux/reboot.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/swap.h> | |
24 | #include <linux/proc_fs.h> | |
25 | #include <linux/bitops.h> | |
139b8304 | 26 | #include <linux/kexec.h> |
974f83ec | 27 | #include <linux/swiotlb.h> |
1da177e4 | 28 | |
1da177e4 | 29 | #include <asm/dma.h> |
8ff059b8 | 30 | #include <asm/efi.h> |
1da177e4 | 31 | #include <asm/io.h> |
1da177e4 LT |
32 | #include <asm/numa.h> |
33 | #include <asm/patch.h> | |
34 | #include <asm/pgalloc.h> | |
35 | #include <asm/sal.h> | |
36 | #include <asm/sections.h> | |
1da177e4 | 37 | #include <asm/tlb.h> |
7c0f6ba6 | 38 | #include <linux/uaccess.h> |
1da177e4 LT |
39 | #include <asm/unistd.h> |
40 | #include <asm/mca.h> | |
41 | ||
1da177e4 LT |
42 | extern void ia64_tlb_init (void); |
43 | ||
44 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; | |
45 | ||
46 | #ifdef CONFIG_VIRTUAL_MEM_MAP | |
126b3fcd TH |
47 | unsigned long VMALLOC_END = VMALLOC_END_INIT; |
48 | EXPORT_SYMBOL(VMALLOC_END); | |
1da177e4 LT |
49 | struct page *vmem_map; |
50 | EXPORT_SYMBOL(vmem_map); | |
51 | #endif | |
52 | ||
fde740e4 | 53 | struct page *zero_page_memmap_ptr; /* map entry for zero page */ |
1da177e4 LT |
54 | EXPORT_SYMBOL(zero_page_memmap_ptr); |
55 | ||
1da177e4 | 56 | void |
954ffcb3 | 57 | __ia64_sync_icache_dcache (pte_t pte) |
1da177e4 LT |
58 | { |
59 | unsigned long addr; | |
60 | struct page *page; | |
61 | ||
1da177e4 LT |
62 | page = pte_page(pte); |
63 | addr = (unsigned long) page_address(page); | |
64 | ||
65 | if (test_bit(PG_arch_1, &page->flags)) | |
66 | return; /* i-cache is already coherent with d-cache */ | |
67 | ||
a50b854e | 68 | flush_icache_range(addr, addr + page_size(page)); |
1da177e4 LT |
69 | set_bit(PG_arch_1, &page->flags); /* mark page as clean */ |
70 | } | |
71 | ||
cde14bbf JB |
72 | /* |
73 | * Since DMA is i-cache coherent, any (complete) pages that were written via | |
74 | * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to | |
75 | * flush them when they get mapped into an executable vm-area. | |
76 | */ | |
abdaf11a | 77 | void arch_dma_mark_clean(phys_addr_t paddr, size_t size) |
cde14bbf | 78 | { |
68c60834 CH |
79 | unsigned long pfn = PHYS_PFN(paddr); |
80 | ||
81 | do { | |
82 | set_bit(PG_arch_1, &pfn_to_page(pfn)->flags); | |
83 | } while (++pfn <= PHYS_PFN(paddr + size - 1)); | |
cde14bbf JB |
84 | } |
85 | ||
1da177e4 LT |
86 | inline void |
87 | ia64_set_rbs_bot (void) | |
88 | { | |
02b763b8 | 89 | unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16; |
1da177e4 LT |
90 | |
91 | if (stack_size > MAX_USER_STACK_SIZE) | |
92 | stack_size = MAX_USER_STACK_SIZE; | |
83d2cd3d | 93 | current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size); |
1da177e4 LT |
94 | } |
95 | ||
96 | /* | |
97 | * This performs some platform-dependent address space initialization. | |
98 | * On IA-64, we want to setup the VM area for the register backing | |
99 | * store (which grows upwards) and install the gateway page which is | |
100 | * used for signal trampolines, etc. | |
101 | */ | |
102 | void | |
103 | ia64_init_addr_space (void) | |
104 | { | |
105 | struct vm_area_struct *vma; | |
106 | ||
107 | ia64_set_rbs_bot(); | |
108 | ||
109 | /* | |
110 | * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore | |
111 | * the problem. When the process attempts to write to the register backing store | |
112 | * for the first time, it will get a SEGFAULT in this case. | |
113 | */ | |
490fc053 | 114 | vma = vm_area_alloc(current->mm); |
1da177e4 | 115 | if (vma) { |
ebad825c | 116 | vma_set_anonymous(vma); |
1da177e4 LT |
117 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; |
118 | vma->vm_end = vma->vm_start + PAGE_SIZE; | |
46dea3d0 | 119 | vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; |
3ed75eb8 | 120 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
d8ed45c5 | 121 | mmap_write_lock(current->mm); |
1da177e4 | 122 | if (insert_vm_struct(current->mm, vma)) { |
d8ed45c5 | 123 | mmap_write_unlock(current->mm); |
3928d4f5 | 124 | vm_area_free(vma); |
1da177e4 LT |
125 | return; |
126 | } | |
d8ed45c5 | 127 | mmap_write_unlock(current->mm); |
1da177e4 LT |
128 | } |
129 | ||
130 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ | |
131 | if (!(current->personality & MMAP_PAGE_ZERO)) { | |
490fc053 | 132 | vma = vm_area_alloc(current->mm); |
1da177e4 | 133 | if (vma) { |
ebad825c | 134 | vma_set_anonymous(vma); |
1da177e4 LT |
135 | vma->vm_end = PAGE_SIZE; |
136 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); | |
314e51b9 KK |
137 | vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | |
138 | VM_DONTEXPAND | VM_DONTDUMP; | |
d8ed45c5 | 139 | mmap_write_lock(current->mm); |
1da177e4 | 140 | if (insert_vm_struct(current->mm, vma)) { |
d8ed45c5 | 141 | mmap_write_unlock(current->mm); |
3928d4f5 | 142 | vm_area_free(vma); |
1da177e4 LT |
143 | return; |
144 | } | |
d8ed45c5 | 145 | mmap_write_unlock(current->mm); |
1da177e4 LT |
146 | } |
147 | } | |
148 | } | |
149 | ||
150 | void | |
151 | free_initmem (void) | |
152 | { | |
11199692 | 153 | free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end), |
dbe67df4 | 154 | -1, "unused kernel"); |
1da177e4 LT |
155 | } |
156 | ||
dae28066 | 157 | void __init |
1da177e4 LT |
158 | free_initrd_mem (unsigned long start, unsigned long end) |
159 | { | |
1da177e4 LT |
160 | /* |
161 | * EFI uses 4KB pages while the kernel can use 4KB or bigger. | |
162 | * Thus EFI and the kernel may have different page sizes. It is | |
163 | * therefore possible to have the initrd share the same page as | |
164 | * the end of the kernel (given current setup). | |
165 | * | |
166 | * To avoid freeing/using the wrong page (kernel sized) we: | |
167 | * - align up the beginning of initrd | |
168 | * - align down the end of initrd | |
169 | * | |
170 | * | | | |
171 | * |=============| a000 | |
172 | * | | | |
173 | * | | | |
174 | * | | 9000 | |
175 | * |/////////////| | |
176 | * |/////////////| | |
177 | * |=============| 8000 | |
178 | * |///INITRD////| | |
179 | * |/////////////| | |
180 | * |/////////////| 7000 | |
181 | * | | | |
182 | * |KKKKKKKKKKKKK| | |
183 | * |=============| 6000 | |
184 | * |KKKKKKKKKKKKK| | |
185 | * |KKKKKKKKKKKKK| | |
186 | * K=kernel using 8KB pages | |
187 | * | |
188 | * In this example, we must free page 8000 ONLY. So we must align up | |
189 | * initrd_start and keep initrd_end as is. | |
190 | */ | |
191 | start = PAGE_ALIGN(start); | |
192 | end = end & PAGE_MASK; | |
193 | ||
194 | if (start < end) | |
195 | printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); | |
196 | ||
197 | for (; start < end; start += PAGE_SIZE) { | |
198 | if (!virt_addr_valid(start)) | |
199 | continue; | |
66f62594 | 200 | free_reserved_page(virt_to_page(start)); |
1da177e4 LT |
201 | } |
202 | } | |
203 | ||
204 | /* | |
205 | * This installs a clean page in the kernel's page table. | |
206 | */ | |
dae28066 | 207 | static struct page * __init |
1da177e4 LT |
208 | put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) |
209 | { | |
210 | pgd_t *pgd; | |
c03ab9e3 | 211 | p4d_t *p4d; |
1da177e4 LT |
212 | pud_t *pud; |
213 | pmd_t *pmd; | |
214 | pte_t *pte; | |
215 | ||
1da177e4 LT |
216 | pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ |
217 | ||
1da177e4 | 218 | { |
c03ab9e3 MR |
219 | p4d = p4d_alloc(&init_mm, pgd, address); |
220 | if (!p4d) | |
221 | goto out; | |
222 | pud = pud_alloc(&init_mm, p4d, address); | |
1da177e4 LT |
223 | if (!pud) |
224 | goto out; | |
1da177e4 LT |
225 | pmd = pmd_alloc(&init_mm, pud, address); |
226 | if (!pmd) | |
227 | goto out; | |
872fec16 | 228 | pte = pte_alloc_kernel(pmd, address); |
1da177e4 LT |
229 | if (!pte) |
230 | goto out; | |
872fec16 | 231 | if (!pte_none(*pte)) |
1da177e4 | 232 | goto out; |
1da177e4 | 233 | set_pte(pte, mk_pte(page, pgprot)); |
1da177e4 | 234 | } |
872fec16 | 235 | out: |
1da177e4 LT |
236 | /* no need for flush_tlb */ |
237 | return page; | |
238 | } | |
239 | ||
914a4ea4 | 240 | static void __init |
1da177e4 LT |
241 | setup_gate (void) |
242 | { | |
243 | struct page *page; | |
244 | ||
245 | /* | |
ad597bd5 DMT |
246 | * Map the gate page twice: once read-only to export the ELF |
247 | * headers etc. and once execute-only page to enable | |
248 | * privilege-promotion via "epc": | |
1da177e4 | 249 | */ |
e55645ec | 250 | page = virt_to_page(ia64_imva(__start_gate_section)); |
1da177e4 LT |
251 | put_kernel_page(page, GATE_ADDR, PAGE_READONLY); |
252 | #ifdef HAVE_BUGGY_SEGREL | |
e55645ec | 253 | page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); |
1da177e4 LT |
254 | put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); |
255 | #else | |
256 | put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); | |
ad597bd5 DMT |
257 | /* Fill in the holes (if any) with read-only zero pages: */ |
258 | { | |
259 | unsigned long addr; | |
260 | ||
261 | for (addr = GATE_ADDR + PAGE_SIZE; | |
262 | addr < GATE_ADDR + PERCPU_PAGE_SIZE; | |
263 | addr += PAGE_SIZE) | |
264 | { | |
265 | put_kernel_page(ZERO_PAGE(0), addr, | |
266 | PAGE_READONLY); | |
267 | put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE, | |
268 | PAGE_READONLY); | |
269 | } | |
270 | } | |
1da177e4 LT |
271 | #endif |
272 | ia64_patch_gate(); | |
273 | } | |
274 | ||
a6c19dfe AL |
275 | static struct vm_area_struct gate_vma; |
276 | ||
277 | static int __init gate_vma_init(void) | |
278 | { | |
2c4541e2 | 279 | vma_init(&gate_vma, NULL); |
a6c19dfe AL |
280 | gate_vma.vm_start = FIXADDR_USER_START; |
281 | gate_vma.vm_end = FIXADDR_USER_END; | |
282 | gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; | |
283 | gate_vma.vm_page_prot = __P101; | |
284 | ||
285 | return 0; | |
286 | } | |
287 | __initcall(gate_vma_init); | |
288 | ||
289 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) | |
290 | { | |
291 | return &gate_vma; | |
292 | } | |
293 | ||
294 | int in_gate_area_no_mm(unsigned long addr) | |
295 | { | |
296 | if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) | |
297 | return 1; | |
298 | return 0; | |
299 | } | |
300 | ||
301 | int in_gate_area(struct mm_struct *mm, unsigned long addr) | |
302 | { | |
303 | return in_gate_area_no_mm(addr); | |
304 | } | |
305 | ||
5b5e76e9 | 306 | void ia64_mmu_init(void *my_cpu_data) |
1da177e4 | 307 | { |
00b65985 | 308 | unsigned long pta, impl_va_bits; |
5b5e76e9 | 309 | extern void tlb_init(void); |
1da177e4 LT |
310 | |
311 | #ifdef CONFIG_DISABLE_VHPT | |
312 | # define VHPT_ENABLE_BIT 0 | |
313 | #else | |
314 | # define VHPT_ENABLE_BIT 1 | |
315 | #endif | |
316 | ||
1da177e4 LT |
317 | /* |
318 | * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped | |
319 | * address space. The IA-64 architecture guarantees that at least 50 bits of | |
320 | * virtual address space are implemented but if we pick a large enough page size | |
321 | * (e.g., 64KB), the mapped address space is big enough that it will overlap with | |
322 | * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages, | |
323 | * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a | |
324 | * problem in practice. Alternatively, we could truncate the top of the mapped | |
325 | * address space to not permit mappings that would overlap with the VMLPT. | |
326 | * --davidm 00/12/06 | |
327 | */ | |
328 | # define pte_bits 3 | |
329 | # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT) | |
330 | /* | |
331 | * The virtual page table has to cover the entire implemented address space within | |
332 | * a region even though not all of this space may be mappable. The reason for | |
333 | * this is that the Access bit and Dirty bit fault handlers perform | |
334 | * non-speculative accesses to the virtual page table, so the address range of the | |
335 | * virtual page table itself needs to be covered by virtual page table. | |
336 | */ | |
337 | # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits) | |
338 | # define POW2(n) (1ULL << (n)) | |
339 | ||
340 | impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61))); | |
341 | ||
342 | if (impl_va_bits < 51 || impl_va_bits > 61) | |
343 | panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); | |
6cf07a8c PC |
344 | /* |
345 | * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, | |
346 | * which must fit into "vmlpt_bits - pte_bits" slots. Second half of | |
347 | * the test makes sure that our mapped space doesn't overlap the | |
348 | * unimplemented hole in the middle of the region. | |
349 | */ | |
350 | if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || | |
351 | (mapped_space_bits > impl_va_bits - 1)) | |
352 | panic("Cannot build a big enough virtual-linear page table" | |
353 | " to cover mapped address space.\n" | |
354 | " Try using a smaller page size.\n"); | |
355 | ||
1da177e4 LT |
356 | |
357 | /* place the VMLPT at the end of each page-table mapped region: */ | |
358 | pta = POW2(61) - POW2(vmlpt_bits); | |
359 | ||
1da177e4 LT |
360 | /* |
361 | * Set the (virtually mapped linear) page table address. Bit | |
362 | * 8 selects between the short and long format, bits 2-7 the | |
363 | * size of the table, and bit 0 whether the VHPT walker is | |
364 | * enabled. | |
365 | */ | |
366 | ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT); | |
367 | ||
368 | ia64_tlb_init(); | |
369 | ||
370 | #ifdef CONFIG_HUGETLB_PAGE | |
371 | ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2); | |
372 | ia64_srlz_d(); | |
373 | #endif | |
374 | } | |
375 | ||
376 | #ifdef CONFIG_VIRTUAL_MEM_MAP | |
e44e41d0 BP |
377 | int vmemmap_find_next_valid_pfn(int node, int i) |
378 | { | |
379 | unsigned long end_address, hole_next_pfn; | |
380 | unsigned long stop_address; | |
381 | pg_data_t *pgdat = NODE_DATA(node); | |
382 | ||
383 | end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; | |
384 | end_address = PAGE_ALIGN(end_address); | |
6408068e | 385 | stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)]; |
e44e41d0 BP |
386 | |
387 | do { | |
388 | pgd_t *pgd; | |
c03ab9e3 | 389 | p4d_t *p4d; |
e44e41d0 BP |
390 | pud_t *pud; |
391 | pmd_t *pmd; | |
392 | pte_t *pte; | |
393 | ||
394 | pgd = pgd_offset_k(end_address); | |
395 | if (pgd_none(*pgd)) { | |
396 | end_address += PGDIR_SIZE; | |
397 | continue; | |
398 | } | |
399 | ||
c03ab9e3 MR |
400 | p4d = p4d_offset(pgd, end_address); |
401 | if (p4d_none(*p4d)) { | |
402 | end_address += P4D_SIZE; | |
403 | continue; | |
404 | } | |
405 | ||
406 | pud = pud_offset(p4d, end_address); | |
e44e41d0 BP |
407 | if (pud_none(*pud)) { |
408 | end_address += PUD_SIZE; | |
409 | continue; | |
410 | } | |
411 | ||
412 | pmd = pmd_offset(pud, end_address); | |
413 | if (pmd_none(*pmd)) { | |
414 | end_address += PMD_SIZE; | |
415 | continue; | |
416 | } | |
417 | ||
418 | pte = pte_offset_kernel(pmd, end_address); | |
419 | retry_pte: | |
420 | if (pte_none(*pte)) { | |
421 | end_address += PAGE_SIZE; | |
422 | pte++; | |
423 | if ((end_address < stop_address) && | |
424 | (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) | |
425 | goto retry_pte; | |
426 | continue; | |
427 | } | |
428 | /* Found next valid vmem_map page */ | |
429 | break; | |
430 | } while (end_address < stop_address); | |
431 | ||
432 | end_address = min(end_address, stop_address); | |
433 | end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; | |
434 | hole_next_pfn = end_address / sizeof(struct page); | |
435 | return hole_next_pfn - pgdat->node_start_pfn; | |
436 | } | |
1da177e4 | 437 | |
e088a4ad | 438 | int __init create_mem_map_page_table(u64 start, u64 end, void *arg) |
1da177e4 LT |
439 | { |
440 | unsigned long address, start_page, end_page; | |
441 | struct page *map_start, *map_end; | |
442 | int node; | |
443 | pgd_t *pgd; | |
c03ab9e3 | 444 | p4d_t *p4d; |
1da177e4 LT |
445 | pud_t *pud; |
446 | pmd_t *pmd; | |
447 | pte_t *pte; | |
448 | ||
449 | map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); | |
450 | map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); | |
451 | ||
452 | start_page = (unsigned long) map_start & PAGE_MASK; | |
453 | end_page = PAGE_ALIGN((unsigned long) map_end); | |
454 | node = paddr_to_nid(__pa(start)); | |
455 | ||
456 | for (address = start_page; address < end_page; address += PAGE_SIZE) { | |
457 | pgd = pgd_offset_k(address); | |
d80db5c1 | 458 | if (pgd_none(*pgd)) { |
c03ab9e3 MR |
459 | p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); |
460 | if (!p4d) | |
461 | goto err_alloc; | |
462 | pgd_populate(&init_mm, pgd, p4d); | |
463 | } | |
464 | p4d = p4d_offset(pgd, address); | |
465 | ||
466 | if (p4d_none(*p4d)) { | |
d80db5c1 MR |
467 | pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); |
468 | if (!pud) | |
469 | goto err_alloc; | |
c03ab9e3 | 470 | p4d_populate(&init_mm, p4d, pud); |
d80db5c1 | 471 | } |
c03ab9e3 | 472 | pud = pud_offset(p4d, address); |
1da177e4 | 473 | |
d80db5c1 MR |
474 | if (pud_none(*pud)) { |
475 | pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); | |
476 | if (!pmd) | |
477 | goto err_alloc; | |
478 | pud_populate(&init_mm, pud, pmd); | |
479 | } | |
1da177e4 LT |
480 | pmd = pmd_offset(pud, address); |
481 | ||
d80db5c1 MR |
482 | if (pmd_none(*pmd)) { |
483 | pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); | |
484 | if (!pte) | |
485 | goto err_alloc; | |
486 | pmd_populate_kernel(&init_mm, pmd, pte); | |
487 | } | |
1da177e4 LT |
488 | pte = pte_offset_kernel(pmd, address); |
489 | ||
d80db5c1 MR |
490 | if (pte_none(*pte)) { |
491 | void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, | |
492 | node); | |
493 | if (!page) | |
494 | goto err_alloc; | |
495 | set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT, | |
1da177e4 | 496 | PAGE_KERNEL)); |
d80db5c1 | 497 | } |
1da177e4 LT |
498 | } |
499 | return 0; | |
d80db5c1 MR |
500 | |
501 | err_alloc: | |
502 | panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n", | |
503 | __func__, PAGE_SIZE, PAGE_SIZE, node); | |
504 | return -ENOMEM; | |
1da177e4 LT |
505 | } |
506 | ||
507 | struct memmap_init_callback_data { | |
508 | struct page *start; | |
509 | struct page *end; | |
510 | int nid; | |
511 | unsigned long zone; | |
512 | }; | |
513 | ||
18b8befd | 514 | static int __meminit |
e088a4ad | 515 | virtual_memmap_init(u64 start, u64 end, void *arg) |
1da177e4 LT |
516 | { |
517 | struct memmap_init_callback_data *args; | |
518 | struct page *map_start, *map_end; | |
519 | ||
520 | args = (struct memmap_init_callback_data *) arg; | |
521 | map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); | |
522 | map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); | |
523 | ||
524 | if (map_start < args->start) | |
525 | map_start = args->start; | |
526 | if (map_end > args->end) | |
527 | map_end = args->end; | |
528 | ||
529 | /* | |
530 | * We have to initialize "out of bounds" struct page elements that fit completely | |
531 | * on the same pages that were allocated for the "in bounds" elements because they | |
532 | * may be referenced later (and found to be "reserved"). | |
533 | */ | |
534 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); | |
535 | map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) | |
536 | / sizeof(struct page)); | |
537 | ||
538 | if (map_start < map_end) | |
ab28cb6e | 539 | memmap_init_range((unsigned long)(map_end - map_start), |
dc2da7b4 | 540 | args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end), |
d882c006 | 541 | MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); |
1da177e4 LT |
542 | return 0; |
543 | } | |
544 | ||
18b8befd | 545 | void __meminit |
ab28cb6e | 546 | memmap_init_zone(unsigned long size, int nid, unsigned long zone, |
1da177e4 LT |
547 | unsigned long start_pfn) |
548 | { | |
a99583e7 | 549 | if (!vmem_map) { |
ab28cb6e | 550 | memmap_init_range(size, nid, zone, start_pfn, start_pfn + size, |
d882c006 | 551 | MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); |
a99583e7 | 552 | } else { |
1da177e4 LT |
553 | struct page *start; |
554 | struct memmap_init_callback_data args; | |
555 | ||
556 | start = pfn_to_page(start_pfn); | |
557 | args.start = start; | |
558 | args.end = start + size; | |
559 | args.nid = nid; | |
560 | args.zone = zone; | |
561 | ||
562 | efi_memmap_walk(virtual_memmap_init, &args); | |
563 | } | |
564 | } | |
565 | ||
566 | int | |
567 | ia64_pfn_valid (unsigned long pfn) | |
568 | { | |
569 | char byte; | |
570 | struct page *pg = pfn_to_page(pfn); | |
571 | ||
572 | return (__get_user(byte, (char __user *) pg) == 0) | |
573 | && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) | |
574 | || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0)); | |
575 | } | |
576 | EXPORT_SYMBOL(ia64_pfn_valid); | |
577 | ||
139b8304 BP |
578 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ |
579 | ||
e088a4ad | 580 | int __init register_active_ranges(u64 start, u64 len, int nid) |
05e0caad | 581 | { |
98075d24 | 582 | u64 end = start + len; |
139b8304 | 583 | |
139b8304 BP |
584 | #ifdef CONFIG_KEXEC |
585 | if (start > crashk_res.start && start < crashk_res.end) | |
586 | start = crashk_res.end; | |
587 | if (end > crashk_res.start && end < crashk_res.end) | |
588 | end = crashk_res.start; | |
589 | #endif | |
590 | ||
591 | if (start < end) | |
98e4ae8a | 592 | memblock_add_node(__pa(start), end - start, nid); |
05e0caad MG |
593 | return 0; |
594 | } | |
1da177e4 | 595 | |
a3f5c338 | 596 | int |
e088a4ad | 597 | find_max_min_low_pfn (u64 start, u64 end, void *arg) |
a3f5c338 ZN |
598 | { |
599 | unsigned long pfn_start, pfn_end; | |
600 | #ifdef CONFIG_FLATMEM | |
601 | pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT; | |
602 | pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT; | |
603 | #else | |
604 | pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT; | |
605 | pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT; | |
606 | #endif | |
607 | min_low_pfn = min(min_low_pfn, pfn_start); | |
608 | max_low_pfn = max(max_low_pfn, pfn_end); | |
609 | return 0; | |
610 | } | |
611 | ||
1da177e4 LT |
612 | /* |
613 | * Boot command-line option "nolwsys" can be used to disable the use of any light-weight | |
614 | * system call handler. When this option is in effect, all fsyscalls will end up bubbling | |
615 | * down into the kernel and calling the normal (heavy-weight) syscall handler. This is | |
616 | * useful for performance testing, but conceivably could also come in handy for debugging | |
617 | * purposes. | |
618 | */ | |
619 | ||
03906ea0 | 620 | static int nolwsys __initdata; |
1da177e4 LT |
621 | |
622 | static int __init | |
623 | nolwsys_setup (char *s) | |
624 | { | |
625 | nolwsys = 1; | |
626 | return 1; | |
627 | } | |
628 | ||
629 | __setup("nolwsys", nolwsys_setup); | |
630 | ||
dae28066 | 631 | void __init |
1da177e4 LT |
632 | mem_init (void) |
633 | { | |
1da177e4 | 634 | int i; |
1da177e4 | 635 | |
fde740e4 RH |
636 | BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); |
637 | BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); | |
638 | BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); | |
639 | ||
1da177e4 | 640 | /* |
974f83ec CH |
641 | * This needs to be called _after_ the command line has been parsed but |
642 | * _before_ any drivers that may need the PCI DMA interface are | |
643 | * initialized or bootmem has been freed. | |
1da177e4 | 644 | */ |
974f83ec CH |
645 | #ifdef CONFIG_INTEL_IOMMU |
646 | detect_intel_iommu(); | |
647 | if (!iommu_detected) | |
648 | #endif | |
649 | #ifdef CONFIG_SWIOTLB | |
650 | swiotlb_init(1); | |
651 | #endif | |
1da177e4 | 652 | |
2d4b1fa2 | 653 | #ifdef CONFIG_FLATMEM |
80a03e29 | 654 | BUG_ON(!mem_map); |
1da177e4 LT |
655 | #endif |
656 | ||
b57b63a2 | 657 | set_max_mapnr(max_low_pfn); |
1da177e4 | 658 | high_memory = __va(max_low_pfn * PAGE_SIZE); |
c6ffc5ca | 659 | memblock_free_all(); |
de4bcddc | 660 | mem_init_print_info(NULL); |
1da177e4 LT |
661 | |
662 | /* | |
663 | * For fsyscall entrpoints with no light-weight handler, use the ordinary | |
664 | * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry | |
665 | * code can tell them apart. | |
666 | */ | |
667 | for (i = 0; i < NR_syscalls; ++i) { | |
e55645ec | 668 | extern unsigned long fsyscall_table[NR_syscalls]; |
1da177e4 LT |
669 | extern unsigned long sys_call_table[NR_syscalls]; |
670 | ||
671 | if (!fsyscall_table[i] || nolwsys) | |
672 | fsyscall_table[i] = sys_call_table[i] | 1; | |
673 | } | |
674 | setup_gate(); | |
1da177e4 | 675 | } |
1681b8e1 YG |
676 | |
677 | #ifdef CONFIG_MEMORY_HOTPLUG | |
940519f0 | 678 | int arch_add_memory(int nid, u64 start, u64 size, |
f5637d3b | 679 | struct mhp_params *params) |
1681b8e1 | 680 | { |
1681b8e1 YG |
681 | unsigned long start_pfn = start >> PAGE_SHIFT; |
682 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
683 | int ret; | |
684 | ||
bfeb022f LG |
685 | if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) |
686 | return -EINVAL; | |
687 | ||
f5637d3b | 688 | ret = __add_pages(nid, start_pfn, nr_pages, params); |
1681b8e1 YG |
689 | if (ret) |
690 | printk("%s: Problem encountered in __add_pages() as ret=%d\n", | |
d4ed8084 | 691 | __func__, ret); |
1681b8e1 YG |
692 | |
693 | return ret; | |
694 | } | |
24d335ca | 695 | |
ac5c9426 DH |
696 | void arch_remove_memory(int nid, u64 start, u64 size, |
697 | struct vmem_altmap *altmap) | |
24d335ca WC |
698 | { |
699 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
700 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
24d335ca | 701 | |
feee6b29 | 702 | __remove_pages(start_pfn, nr_pages, altmap); |
24d335ca WC |
703 | } |
704 | #endif |