arm64: add support for kernel ASLR
[linux-2.6-block.git] / arch / arm64 / mm / mmu.c
CommitLineData
c1cc1552
CM
1/*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/init.h>
61bd93ce 24#include <linux/libfdt.h>
c1cc1552
CM
25#include <linux/mman.h>
26#include <linux/nodemask.h>
27#include <linux/memblock.h>
28#include <linux/fs.h>
2475ff9d 29#include <linux/io.h>
41089357 30#include <linux/slab.h>
da141706 31#include <linux/stop_machine.h>
c1cc1552 32
21ab99c2 33#include <asm/barrier.h>
c1cc1552 34#include <asm/cputype.h>
af86e597 35#include <asm/fixmap.h>
068a17a5 36#include <asm/kasan.h>
b433dce0 37#include <asm/kernel-pgtable.h>
c1cc1552
CM
38#include <asm/sections.h>
39#include <asm/setup.h>
40#include <asm/sizes.h>
41#include <asm/tlb.h>
c79b954b 42#include <asm/memblock.h>
c1cc1552
CM
43#include <asm/mmu_context.h>
44
45#include "mm.h"
46
dd006da2
AB
47u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
48
a7f8de16
AB
49u64 kimage_voffset __read_mostly;
50EXPORT_SYMBOL(kimage_voffset);
51
c1cc1552
CM
52/*
53 * Empty_zero_page is a special page that is used for zero-initialized data
54 * and COW.
55 */
5227cfa7 56unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
c1cc1552
CM
57EXPORT_SYMBOL(empty_zero_page);
58
f9040773
AB
59static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
60static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
61static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
62
c1cc1552
CM
63pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
64 unsigned long size, pgprot_t vma_prot)
65{
66 if (!pfn_valid(pfn))
67 return pgprot_noncached(vma_prot);
68 else if (file->f_flags & O_SYNC)
69 return pgprot_writecombine(vma_prot);
70 return vma_prot;
71}
72EXPORT_SYMBOL(phys_mem_access_prot);
73
f4710445 74static phys_addr_t __init early_pgtable_alloc(void)
c1cc1552 75{
7142392d
SP
76 phys_addr_t phys;
77 void *ptr;
78
21ab99c2 79 phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
7142392d 80 BUG_ON(!phys);
f4710445
MR
81
82 /*
83 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
84 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
85 * any level of table.
86 */
87 ptr = pte_set_fixmap(phys);
88
21ab99c2
MR
89 memset(ptr, 0, PAGE_SIZE);
90
f4710445
MR
91 /*
92 * Implicit barriers also ensure the zeroed page is visible to the page
93 * table walker
94 */
95 pte_clear_fixmap();
96
97 return phys;
c1cc1552
CM
98}
99
da141706
LA
100/*
101 * remap a PMD into pages
102 */
103static void split_pmd(pmd_t *pmd, pte_t *pte)
104{
105 unsigned long pfn = pmd_pfn(*pmd);
106 int i = 0;
107
108 do {
109 /*
110 * Need to have the least restrictive permissions available
667c2759 111 * permissions will be fixed up later
da141706 112 */
667c2759 113 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
da141706
LA
114 pfn++;
115 } while (pte++, i++, i < PTRS_PER_PTE);
116}
117
118static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
667c2759 119 unsigned long end, unsigned long pfn,
da141706 120 pgprot_t prot,
f4710445 121 phys_addr_t (*pgtable_alloc)(void))
c1cc1552
CM
122{
123 pte_t *pte;
124
a1c76574 125 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
132233a7
LA
126 phys_addr_t pte_phys;
127 BUG_ON(!pgtable_alloc);
128 pte_phys = pgtable_alloc();
f4710445 129 pte = pte_set_fixmap(pte_phys);
da141706
LA
130 if (pmd_sect(*pmd))
131 split_pmd(pmd, pte);
f4710445 132 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
da141706 133 flush_tlb_all();
f4710445 134 pte_clear_fixmap();
c1cc1552 135 }
a1c76574 136 BUG_ON(pmd_bad(*pmd));
c1cc1552 137
f4710445 138 pte = pte_set_fixmap_offset(pmd, addr);
c1cc1552 139 do {
667c2759
CM
140 set_pte(pte, pfn_pte(pfn, prot));
141 pfn++;
142 } while (pte++, addr += PAGE_SIZE, addr != end);
f4710445
MR
143
144 pte_clear_fixmap();
c1cc1552
CM
145}
146
9a17a213 147static void split_pud(pud_t *old_pud, pmd_t *pmd)
da141706
LA
148{
149 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
150 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
151 int i = 0;
152
153 do {
1e43ba9c 154 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
da141706
LA
155 addr += PMD_SIZE;
156 } while (pmd++, i++, i < PTRS_PER_PMD);
157}
158
83863f25
LA
159#ifdef CONFIG_DEBUG_PAGEALLOC
160static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
161{
162
163 /*
164 * If debug_page_alloc is enabled we must map the linear map
165 * using pages. However, other mappings created by
166 * create_mapping_noalloc must use sections in some cases. Allow
167 * sections to be used in those cases, where no pgtable_alloc
168 * function is provided.
169 */
170 return !pgtable_alloc || !debug_pagealloc_enabled();
171}
172#else
173static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
174{
175 return true;
176}
177#endif
178
11509a30 179static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
da141706 180 phys_addr_t phys, pgprot_t prot,
f4710445 181 phys_addr_t (*pgtable_alloc)(void))
c1cc1552
CM
182{
183 pmd_t *pmd;
184 unsigned long next;
185
186 /*
187 * Check for initial section mappings in the pgd/pud and remove them.
188 */
a1c76574 189 if (pud_none(*pud) || pud_sect(*pud)) {
132233a7
LA
190 phys_addr_t pmd_phys;
191 BUG_ON(!pgtable_alloc);
192 pmd_phys = pgtable_alloc();
f4710445 193 pmd = pmd_set_fixmap(pmd_phys);
da141706
LA
194 if (pud_sect(*pud)) {
195 /*
196 * need to have the 1G of mappings continue to be
197 * present
198 */
199 split_pud(pud, pmd);
200 }
f4710445 201 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
da141706 202 flush_tlb_all();
f4710445 203 pmd_clear_fixmap();
c1cc1552 204 }
a1c76574 205 BUG_ON(pud_bad(*pud));
c1cc1552 206
f4710445 207 pmd = pmd_set_fixmap_offset(pud, addr);
c1cc1552
CM
208 do {
209 next = pmd_addr_end(addr, end);
210 /* try section mapping first */
83863f25
LA
211 if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
212 block_mappings_allowed(pgtable_alloc)) {
a55f9929 213 pmd_t old_pmd =*pmd;
8ce837ce
AB
214 set_pmd(pmd, __pmd(phys |
215 pgprot_val(mk_sect_prot(prot))));
a55f9929
CM
216 /*
217 * Check for previous table entries created during
218 * boot (__create_page_tables) and flush them.
219 */
523d6e9f 220 if (!pmd_none(old_pmd)) {
a55f9929 221 flush_tlb_all();
523d6e9f 222 if (pmd_table(old_pmd)) {
316b39db 223 phys_addr_t table = pmd_page_paddr(old_pmd);
41089357
CM
224 if (!WARN_ON_ONCE(slab_is_available()))
225 memblock_free(table, PAGE_SIZE);
523d6e9f 226 }
227 }
a55f9929 228 } else {
667c2759 229 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
21ab99c2 230 prot, pgtable_alloc);
a55f9929 231 }
c1cc1552
CM
232 phys += next - addr;
233 } while (pmd++, addr = next, addr != end);
f4710445
MR
234
235 pmd_clear_fixmap();
c1cc1552
CM
236}
237
da141706
LA
238static inline bool use_1G_block(unsigned long addr, unsigned long next,
239 unsigned long phys)
240{
241 if (PAGE_SHIFT != 12)
242 return false;
243
244 if (((addr | next | phys) & ~PUD_MASK) != 0)
245 return false;
246
247 return true;
248}
249
11509a30 250static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
da141706 251 phys_addr_t phys, pgprot_t prot,
f4710445 252 phys_addr_t (*pgtable_alloc)(void))
c1cc1552 253{
c79b954b 254 pud_t *pud;
c1cc1552
CM
255 unsigned long next;
256
c79b954b 257 if (pgd_none(*pgd)) {
132233a7
LA
258 phys_addr_t pud_phys;
259 BUG_ON(!pgtable_alloc);
260 pud_phys = pgtable_alloc();
f4710445 261 __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
c79b954b
JL
262 }
263 BUG_ON(pgd_bad(*pgd));
264
f4710445 265 pud = pud_set_fixmap_offset(pgd, addr);
c1cc1552
CM
266 do {
267 next = pud_addr_end(addr, end);
206a2a73
SC
268
269 /*
270 * For 4K granule only, attempt to put down a 1GB block
271 */
83863f25
LA
272 if (use_1G_block(addr, next, phys) &&
273 block_mappings_allowed(pgtable_alloc)) {
206a2a73 274 pud_t old_pud = *pud;
8ce837ce
AB
275 set_pud(pud, __pud(phys |
276 pgprot_val(mk_sect_prot(prot))));
206a2a73
SC
277
278 /*
279 * If we have an old value for a pud, it will
280 * be pointing to a pmd table that we no longer
281 * need (from swapper_pg_dir).
282 *
283 * Look up the old pmd table and free it.
284 */
285 if (!pud_none(old_pud)) {
206a2a73 286 flush_tlb_all();
523d6e9f 287 if (pud_table(old_pud)) {
316b39db 288 phys_addr_t table = pud_page_paddr(old_pud);
41089357
CM
289 if (!WARN_ON_ONCE(slab_is_available()))
290 memblock_free(table, PAGE_SIZE);
523d6e9f 291 }
206a2a73
SC
292 }
293 } else {
11509a30 294 alloc_init_pmd(pud, addr, next, phys, prot,
21ab99c2 295 pgtable_alloc);
206a2a73 296 }
c1cc1552
CM
297 phys += next - addr;
298 } while (pud++, addr = next, addr != end);
f4710445
MR
299
300 pud_clear_fixmap();
c1cc1552
CM
301}
302
303/*
304 * Create the page directory entries and any necessary page tables for the
305 * mapping specified by 'md'.
306 */
11509a30 307static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
da141706 308 phys_addr_t size, pgprot_t prot,
f4710445 309 phys_addr_t (*pgtable_alloc)(void))
c1cc1552
CM
310{
311 unsigned long addr, length, end, next;
c1cc1552 312
cc5d2b3b
MR
313 /*
314 * If the virtual and physical address don't have the same offset
315 * within a page, we cannot map the region as the caller expects.
316 */
317 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
318 return;
319
9c4e08a3 320 phys &= PAGE_MASK;
c1cc1552
CM
321 addr = virt & PAGE_MASK;
322 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
323
c1cc1552
CM
324 end = addr + length;
325 do {
326 next = pgd_addr_end(addr, end);
11509a30 327 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
c1cc1552
CM
328 phys += next - addr;
329 } while (pgd++, addr = next, addr != end);
330}
331
f4710445 332static phys_addr_t late_pgtable_alloc(void)
da141706 333{
21ab99c2 334 void *ptr = (void *)__get_free_page(PGALLOC_GFP);
da141706 335 BUG_ON(!ptr);
21ab99c2
MR
336
337 /* Ensure the zeroed page is visible to the page table walker */
338 dsb(ishst);
f4710445 339 return __pa(ptr);
da141706
LA
340}
341
11509a30
MR
342static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
343 unsigned long virt, phys_addr_t size,
344 pgprot_t prot,
345 phys_addr_t (*alloc)(void))
346{
347 init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
348}
349
132233a7
LA
350/*
351 * This function can only be used to modify existing table entries,
352 * without allocating new levels of table. Note that this permits the
353 * creation of new section or page entries.
354 */
355static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
da141706 356 phys_addr_t size, pgprot_t prot)
d7ecbddf
MS
357{
358 if (virt < VMALLOC_START) {
359 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
360 &phys, virt);
361 return;
362 }
11509a30 363 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
132233a7 364 NULL);
d7ecbddf
MS
365}
366
8ce837ce
AB
367void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
368 unsigned long virt, phys_addr_t size,
369 pgprot_t prot)
370{
11509a30
MR
371 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
372 late_pgtable_alloc);
d7ecbddf
MS
373}
374
da141706
LA
375static void create_mapping_late(phys_addr_t phys, unsigned long virt,
376 phys_addr_t size, pgprot_t prot)
377{
378 if (virt < VMALLOC_START) {
379 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
380 &phys, virt);
381 return;
382 }
383
11509a30
MR
384 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
385 late_pgtable_alloc);
da141706
LA
386}
387
068a17a5 388static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
da141706 389{
068a17a5 390 unsigned long kernel_start = __pa(_stext);
f9040773 391 unsigned long kernel_end = __pa(_etext);
068a17a5 392
da141706 393 /*
f9040773
AB
394 * Take care not to create a writable alias for the
395 * read-only text and rodata sections of the kernel image.
da141706 396 */
068a17a5 397
f9040773 398 /* No overlap with the kernel text */
068a17a5
MR
399 if (end < kernel_start || start >= kernel_end) {
400 __create_pgd_mapping(pgd, start, __phys_to_virt(start),
401 end - start, PAGE_KERNEL,
402 early_pgtable_alloc);
403 return;
da141706
LA
404 }
405
068a17a5 406 /*
f9040773
AB
407 * This block overlaps the kernel text mapping.
408 * Map the portion(s) which don't overlap.
068a17a5
MR
409 */
410 if (start < kernel_start)
411 __create_pgd_mapping(pgd, start,
412 __phys_to_virt(start),
413 kernel_start - start, PAGE_KERNEL,
414 early_pgtable_alloc);
415 if (kernel_end < end)
416 __create_pgd_mapping(pgd, kernel_end,
417 __phys_to_virt(kernel_end),
418 end - kernel_end, PAGE_KERNEL,
419 early_pgtable_alloc);
f9040773
AB
420
421 /*
422 * Map the linear alias of the [_stext, _etext) interval as
423 * read-only/non-executable. This makes the contents of the
424 * region accessible to subsystems such as hibernate, but
425 * protects it from inadvertent modification or execution.
426 */
427 __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
428 kernel_end - kernel_start, PAGE_KERNEL_RO,
429 early_pgtable_alloc);
da141706 430}
da141706 431
068a17a5 432static void __init map_mem(pgd_t *pgd)
c1cc1552
CM
433{
434 struct memblock_region *reg;
f6bc87c3 435
c1cc1552
CM
436 /* map all the memory banks */
437 for_each_memblock(memory, reg) {
438 phys_addr_t start = reg->base;
439 phys_addr_t end = start + reg->size;
440
441 if (start >= end)
442 break;
68709f45
AB
443 if (memblock_is_nomap(reg))
444 continue;
c1cc1552 445
068a17a5 446 __map_memblock(pgd, start, end);
c1cc1552
CM
447 }
448}
449
da141706
LA
450void mark_rodata_ro(void)
451{
f9040773
AB
452 if (!IS_ENABLED(CONFIG_DEBUG_RODATA))
453 return;
454
da141706
LA
455 create_mapping_late(__pa(_stext), (unsigned long)_stext,
456 (unsigned long)_etext - (unsigned long)_stext,
0b2aa5b8 457 PAGE_KERNEL_ROX);
da141706 458}
da141706
LA
459
460void fixup_init(void)
461{
f9040773
AB
462 /*
463 * Unmap the __init region but leave the VM area in place. This
464 * prevents the region from being reused for kernel modules, which
465 * is not supported by kallsyms.
466 */
467 unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
da141706
LA
468}
469
068a17a5 470static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
f9040773 471 pgprot_t prot, struct vm_struct *vma)
068a17a5
MR
472{
473 phys_addr_t pa_start = __pa(va_start);
474 unsigned long size = va_end - va_start;
475
476 BUG_ON(!PAGE_ALIGNED(pa_start));
477 BUG_ON(!PAGE_ALIGNED(size));
478
479 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
480 early_pgtable_alloc);
f9040773
AB
481
482 vma->addr = va_start;
483 vma->phys_addr = pa_start;
484 vma->size = size;
485 vma->flags = VM_MAP;
486 vma->caller = __builtin_return_address(0);
487
488 vm_area_add_early(vma);
068a17a5
MR
489}
490
491/*
492 * Create fine-grained mappings for the kernel.
493 */
494static void __init map_kernel(pgd_t *pgd)
495{
f9040773 496 static struct vm_struct vmlinux_text, vmlinux_init, vmlinux_data;
068a17a5 497
f9040773
AB
498 map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
499 map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
500 &vmlinux_init);
501 map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
068a17a5 502
f9040773
AB
503 if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
504 /*
505 * The fixmap falls in a separate pgd to the kernel, and doesn't
506 * live in the carveout for the swapper_pg_dir. We can simply
507 * re-use the existing dir for the fixmap.
508 */
509 set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
510 *pgd_offset_k(FIXADDR_START));
511 } else if (CONFIG_PGTABLE_LEVELS > 3) {
512 /*
513 * The fixmap shares its top level pgd entry with the kernel
514 * mapping. This can really only occur when we are running
515 * with 16k/4 levels, so we can simply reuse the pud level
516 * entry instead.
517 */
518 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
519 set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
520 __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
521 pud_clear_fixmap();
522 } else {
523 BUG();
524 }
068a17a5
MR
525
526 kasan_copy_shadow(pgd);
527}
528
c1cc1552
CM
529/*
530 * paging_init() sets up the page tables, initialises the zone memory
531 * maps and sets up the zero page.
532 */
533void __init paging_init(void)
534{
068a17a5
MR
535 phys_addr_t pgd_phys = early_pgtable_alloc();
536 pgd_t *pgd = pgd_set_fixmap(pgd_phys);
537
538 map_kernel(pgd);
539 map_mem(pgd);
540
541 /*
542 * We want to reuse the original swapper_pg_dir so we don't have to
543 * communicate the new address to non-coherent secondaries in
544 * secondary_entry, and so cpu_switch_mm can generate the address with
545 * adrp+add rather than a load from some global variable.
546 *
547 * To do this we need to go via a temporary pgd.
548 */
549 cpu_replace_ttbr1(__va(pgd_phys));
550 memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
551 cpu_replace_ttbr1(swapper_pg_dir);
552
553 pgd_clear_fixmap();
554 memblock_free(pgd_phys, PAGE_SIZE);
555
556 /*
557 * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
558 * allocated with it.
559 */
560 memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
561 SWAPPER_DIR_SIZE - PAGE_SIZE);
c1cc1552 562
c1cc1552 563 bootmem_init();
c1cc1552
CM
564}
565
c1cc1552
CM
566/*
567 * Check whether a kernel address is valid (derived from arch/x86/).
568 */
569int kern_addr_valid(unsigned long addr)
570{
571 pgd_t *pgd;
572 pud_t *pud;
573 pmd_t *pmd;
574 pte_t *pte;
575
576 if ((((long)addr) >> VA_BITS) != -1UL)
577 return 0;
578
579 pgd = pgd_offset_k(addr);
580 if (pgd_none(*pgd))
581 return 0;
582
583 pud = pud_offset(pgd, addr);
584 if (pud_none(*pud))
585 return 0;
586
206a2a73
SC
587 if (pud_sect(*pud))
588 return pfn_valid(pud_pfn(*pud));
589
c1cc1552
CM
590 pmd = pmd_offset(pud, addr);
591 if (pmd_none(*pmd))
592 return 0;
593
da6e4cb6
DA
594 if (pmd_sect(*pmd))
595 return pfn_valid(pmd_pfn(*pmd));
596
c1cc1552
CM
597 pte = pte_offset_kernel(pmd, addr);
598 if (pte_none(*pte))
599 return 0;
600
601 return pfn_valid(pte_pfn(*pte));
602}
603#ifdef CONFIG_SPARSEMEM_VMEMMAP
b433dce0 604#if !ARM64_SWAPPER_USES_SECTION_MAPS
0aad818b 605int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
c1cc1552 606{
0aad818b 607 return vmemmap_populate_basepages(start, end, node);
c1cc1552 608}
b433dce0 609#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
0aad818b 610int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
c1cc1552 611{
0aad818b 612 unsigned long addr = start;
c1cc1552
CM
613 unsigned long next;
614 pgd_t *pgd;
615 pud_t *pud;
616 pmd_t *pmd;
617
618 do {
619 next = pmd_addr_end(addr, end);
620
621 pgd = vmemmap_pgd_populate(addr, node);
622 if (!pgd)
623 return -ENOMEM;
624
625 pud = vmemmap_pud_populate(pgd, addr, node);
626 if (!pud)
627 return -ENOMEM;
628
629 pmd = pmd_offset(pud, addr);
630 if (pmd_none(*pmd)) {
631 void *p = NULL;
632
633 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
634 if (!p)
635 return -ENOMEM;
636
a501e324 637 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
c1cc1552
CM
638 } else
639 vmemmap_verify((pte_t *)pmd, node, addr, next);
640 } while (addr = next, addr != end);
641
642 return 0;
643}
644#endif /* CONFIG_ARM64_64K_PAGES */
0aad818b 645void vmemmap_free(unsigned long start, unsigned long end)
0197518c
TC
646{
647}
c1cc1552 648#endif /* CONFIG_SPARSEMEM_VMEMMAP */
af86e597 649
af86e597
LA
650static inline pud_t * fixmap_pud(unsigned long addr)
651{
652 pgd_t *pgd = pgd_offset_k(addr);
653
654 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
655
157962f5 656 return pud_offset_kimg(pgd, addr);
af86e597
LA
657}
658
659static inline pmd_t * fixmap_pmd(unsigned long addr)
660{
661 pud_t *pud = fixmap_pud(addr);
662
663 BUG_ON(pud_none(*pud) || pud_bad(*pud));
664
157962f5 665 return pmd_offset_kimg(pud, addr);
af86e597
LA
666}
667
668static inline pte_t * fixmap_pte(unsigned long addr)
669{
157962f5 670 return &bm_pte[pte_index(addr)];
af86e597
LA
671}
672
673void __init early_fixmap_init(void)
674{
675 pgd_t *pgd;
676 pud_t *pud;
677 pmd_t *pmd;
678 unsigned long addr = FIXADDR_START;
679
680 pgd = pgd_offset_k(addr);
f80fb3a3
AB
681 if (CONFIG_PGTABLE_LEVELS > 3 &&
682 !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) {
f9040773
AB
683 /*
684 * We only end up here if the kernel mapping and the fixmap
685 * share the top level pgd entry, which should only happen on
686 * 16k/4 levels configurations.
687 */
688 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
689 pud = pud_offset_kimg(pgd, addr);
690 } else {
691 pgd_populate(&init_mm, pgd, bm_pud);
692 pud = fixmap_pud(addr);
693 }
af86e597 694 pud_populate(&init_mm, pud, bm_pmd);
157962f5 695 pmd = fixmap_pmd(addr);
af86e597
LA
696 pmd_populate_kernel(&init_mm, pmd, bm_pte);
697
698 /*
699 * The boot-ioremap range spans multiple pmds, for which
157962f5 700 * we are not prepared:
af86e597
LA
701 */
702 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
703 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
704
705 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
706 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
707 WARN_ON(1);
708 pr_warn("pmd %p != %p, %p\n",
709 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
710 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
711 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
712 fix_to_virt(FIX_BTMAP_BEGIN));
713 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
714 fix_to_virt(FIX_BTMAP_END));
715
716 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
717 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
718 }
719}
720
721void __set_fixmap(enum fixed_addresses idx,
722 phys_addr_t phys, pgprot_t flags)
723{
724 unsigned long addr = __fix_to_virt(idx);
725 pte_t *pte;
726
b63dbef9 727 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
af86e597
LA
728
729 pte = fixmap_pte(addr);
730
731 if (pgprot_val(flags)) {
732 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
733 } else {
734 pte_clear(&init_mm, addr, pte);
735 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
736 }
737}
61bd93ce 738
f80fb3a3 739void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
61bd93ce
AB
740{
741 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
f80fb3a3 742 int offset;
61bd93ce
AB
743 void *dt_virt;
744
745 /*
746 * Check whether the physical FDT address is set and meets the minimum
747 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
748 * at least 8 bytes so that we can always access the size field of the
749 * FDT header after mapping the first chunk, double check here if that
750 * is indeed the case.
751 */
752 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
753 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
754 return NULL;
755
756 /*
757 * Make sure that the FDT region can be mapped without the need to
758 * allocate additional translation table pages, so that it is safe
132233a7 759 * to call create_mapping_noalloc() this early.
61bd93ce
AB
760 *
761 * On 64k pages, the FDT will be mapped using PTEs, so we need to
762 * be in the same PMD as the rest of the fixmap.
763 * On 4k pages, we'll use section mappings for the FDT so we only
764 * have to be in the same PUD.
765 */
766 BUILD_BUG_ON(dt_virt_base % SZ_2M);
767
b433dce0
SP
768 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
769 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
61bd93ce 770
b433dce0 771 offset = dt_phys % SWAPPER_BLOCK_SIZE;
61bd93ce
AB
772 dt_virt = (void *)dt_virt_base + offset;
773
774 /* map the first chunk so we can read the size from the header */
132233a7
LA
775 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
776 dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
61bd93ce
AB
777
778 if (fdt_check_header(dt_virt) != 0)
779 return NULL;
780
f80fb3a3
AB
781 *size = fdt_totalsize(dt_virt);
782 if (*size > MAX_FDT_SIZE)
61bd93ce
AB
783 return NULL;
784
f80fb3a3 785 if (offset + *size > SWAPPER_BLOCK_SIZE)
132233a7 786 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
f80fb3a3 787 round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
61bd93ce 788
f80fb3a3
AB
789 return dt_virt;
790}
61bd93ce 791
f80fb3a3
AB
792void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
793{
794 void *dt_virt;
795 int size;
796
797 dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
798 if (!dt_virt)
799 return NULL;
800
801 memblock_reserve(dt_phys, size);
61bd93ce
AB
802 return dt_virt;
803}
324420bf
AB
804
805int __init arch_ioremap_pud_supported(void)
806{
807 /* only 4k granule supports level 1 block mappings */
808 return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
809}
810
811int __init arch_ioremap_pmd_supported(void)
812{
813 return 1;
814}
815
816int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
817{
818 BUG_ON(phys & ~PUD_MASK);
819 set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
820 return 1;
821}
822
823int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
824{
825 BUG_ON(phys & ~PMD_MASK);
826 set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
827 return 1;
828}
829
830int pud_clear_huge(pud_t *pud)
831{
832 if (!pud_sect(*pud))
833 return 0;
834 pud_clear(pud);
835 return 1;
836}
837
838int pmd_clear_huge(pmd_t *pmd)
839{
840 if (!pmd_sect(*pmd))
841 return 0;
842 pmd_clear(pmd);
843 return 1;
844}