Commit | Line | Data |
---|---|---|
c1cc1552 CM |
1 | /* |
2 | * Based on arch/arm/mm/mmu.c | |
3 | * | |
4 | * Copyright (C) 1995-2005 Russell King | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
5a9e3e15 | 20 | #include <linux/cache.h> |
c1cc1552 CM |
21 | #include <linux/export.h> |
22 | #include <linux/kernel.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/init.h> | |
98d2e153 TA |
25 | #include <linux/ioport.h> |
26 | #include <linux/kexec.h> | |
61bd93ce | 27 | #include <linux/libfdt.h> |
c1cc1552 CM |
28 | #include <linux/mman.h> |
29 | #include <linux/nodemask.h> | |
30 | #include <linux/memblock.h> | |
31 | #include <linux/fs.h> | |
2475ff9d | 32 | #include <linux/io.h> |
2077be67 | 33 | #include <linux/mm.h> |
6efd8499 | 34 | #include <linux/vmalloc.h> |
c1cc1552 | 35 | |
21ab99c2 | 36 | #include <asm/barrier.h> |
c1cc1552 | 37 | #include <asm/cputype.h> |
af86e597 | 38 | #include <asm/fixmap.h> |
068a17a5 | 39 | #include <asm/kasan.h> |
b433dce0 | 40 | #include <asm/kernel-pgtable.h> |
c1cc1552 CM |
41 | #include <asm/sections.h> |
42 | #include <asm/setup.h> | |
43 | #include <asm/sizes.h> | |
44 | #include <asm/tlb.h> | |
c79b954b | 45 | #include <asm/memblock.h> |
c1cc1552 | 46 | #include <asm/mmu_context.h> |
1404d6f1 | 47 | #include <asm/ptdump.h> |
c1cc1552 | 48 | |
c0951366 | 49 | #define NO_BLOCK_MAPPINGS BIT(0) |
d27cfa1f | 50 | #define NO_CONT_MAPPINGS BIT(1) |
c0951366 | 51 | |
dd006da2 | 52 | u64 idmap_t0sz = TCR_T0SZ(VA_BITS); |
fa2a8445 | 53 | u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; |
dd006da2 | 54 | |
5a9e3e15 | 55 | u64 kimage_voffset __ro_after_init; |
a7f8de16 AB |
56 | EXPORT_SYMBOL(kimage_voffset); |
57 | ||
c1cc1552 CM |
58 | /* |
59 | * Empty_zero_page is a special page that is used for zero-initialized data | |
60 | * and COW. | |
61 | */ | |
5227cfa7 | 62 | unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; |
c1cc1552 CM |
63 | EXPORT_SYMBOL(empty_zero_page); |
64 | ||
f9040773 AB |
65 | static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; |
66 | static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; | |
67 | static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; | |
68 | ||
c1cc1552 CM |
69 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
70 | unsigned long size, pgprot_t vma_prot) | |
71 | { | |
72 | if (!pfn_valid(pfn)) | |
73 | return pgprot_noncached(vma_prot); | |
74 | else if (file->f_flags & O_SYNC) | |
75 | return pgprot_writecombine(vma_prot); | |
76 | return vma_prot; | |
77 | } | |
78 | EXPORT_SYMBOL(phys_mem_access_prot); | |
79 | ||
f4710445 | 80 | static phys_addr_t __init early_pgtable_alloc(void) |
c1cc1552 | 81 | { |
7142392d SP |
82 | phys_addr_t phys; |
83 | void *ptr; | |
84 | ||
21ab99c2 | 85 | phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
f4710445 MR |
86 | |
87 | /* | |
88 | * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE | |
89 | * slot will be free, so we can (ab)use the FIX_PTE slot to initialise | |
90 | * any level of table. | |
91 | */ | |
92 | ptr = pte_set_fixmap(phys); | |
93 | ||
21ab99c2 MR |
94 | memset(ptr, 0, PAGE_SIZE); |
95 | ||
f4710445 MR |
96 | /* |
97 | * Implicit barriers also ensure the zeroed page is visible to the page | |
98 | * table walker | |
99 | */ | |
100 | pte_clear_fixmap(); | |
101 | ||
102 | return phys; | |
c1cc1552 CM |
103 | } |
104 | ||
e98216b5 AB |
105 | static bool pgattr_change_is_safe(u64 old, u64 new) |
106 | { | |
107 | /* | |
108 | * The following mapping attributes may be updated in live | |
109 | * kernel mappings without the need for break-before-make. | |
110 | */ | |
111 | static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE; | |
112 | ||
141d1497 AB |
113 | /* creating or taking down mappings is always safe */ |
114 | if (old == 0 || new == 0) | |
115 | return true; | |
116 | ||
117 | /* live contiguous mappings may not be manipulated at all */ | |
118 | if ((old | new) & PTE_CONT) | |
119 | return false; | |
120 | ||
4e602056 WD |
121 | /* Transitioning from Global to Non-Global is safe */ |
122 | if (((old ^ new) == PTE_NG) && (new & PTE_NG)) | |
123 | return true; | |
124 | ||
141d1497 | 125 | return ((old ^ new) & ~mask) == 0; |
e98216b5 AB |
126 | } |
127 | ||
20a004e7 | 128 | static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, |
d27cfa1f | 129 | phys_addr_t phys, pgprot_t prot) |
c1cc1552 | 130 | { |
20a004e7 | 131 | pte_t *ptep; |
c1cc1552 | 132 | |
20a004e7 | 133 | ptep = pte_set_fixmap_offset(pmdp, addr); |
c1cc1552 | 134 | do { |
20a004e7 | 135 | pte_t old_pte = READ_ONCE(*ptep); |
e98216b5 | 136 | |
20a004e7 | 137 | set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); |
e98216b5 AB |
138 | |
139 | /* | |
140 | * After the PTE entry has been populated once, we | |
141 | * only allow updates to the permission attributes. | |
142 | */ | |
20a004e7 WD |
143 | BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), |
144 | READ_ONCE(pte_val(*ptep)))); | |
e98216b5 | 145 | |
e393cf40 | 146 | phys += PAGE_SIZE; |
20a004e7 | 147 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
f4710445 MR |
148 | |
149 | pte_clear_fixmap(); | |
c1cc1552 CM |
150 | } |
151 | ||
20a004e7 | 152 | static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, |
d27cfa1f AB |
153 | unsigned long end, phys_addr_t phys, |
154 | pgprot_t prot, | |
155 | phys_addr_t (*pgtable_alloc)(void), | |
156 | int flags) | |
c1cc1552 | 157 | { |
c1cc1552 | 158 | unsigned long next; |
20a004e7 | 159 | pmd_t pmd = READ_ONCE(*pmdp); |
c1cc1552 | 160 | |
20a004e7 WD |
161 | BUG_ON(pmd_sect(pmd)); |
162 | if (pmd_none(pmd)) { | |
d27cfa1f | 163 | phys_addr_t pte_phys; |
132233a7 | 164 | BUG_ON(!pgtable_alloc); |
d27cfa1f | 165 | pte_phys = pgtable_alloc(); |
20a004e7 WD |
166 | __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); |
167 | pmd = READ_ONCE(*pmdp); | |
c1cc1552 | 168 | } |
20a004e7 | 169 | BUG_ON(pmd_bad(pmd)); |
d27cfa1f AB |
170 | |
171 | do { | |
172 | pgprot_t __prot = prot; | |
173 | ||
174 | next = pte_cont_addr_end(addr, end); | |
175 | ||
176 | /* use a contiguous mapping if the range is suitably aligned */ | |
177 | if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && | |
178 | (flags & NO_CONT_MAPPINGS) == 0) | |
179 | __prot = __pgprot(pgprot_val(prot) | PTE_CONT); | |
180 | ||
20a004e7 | 181 | init_pte(pmdp, addr, next, phys, __prot); |
d27cfa1f AB |
182 | |
183 | phys += next - addr; | |
184 | } while (addr = next, addr != end); | |
185 | } | |
186 | ||
20a004e7 | 187 | static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, |
d27cfa1f AB |
188 | phys_addr_t phys, pgprot_t prot, |
189 | phys_addr_t (*pgtable_alloc)(void), int flags) | |
190 | { | |
191 | unsigned long next; | |
20a004e7 | 192 | pmd_t *pmdp; |
c1cc1552 | 193 | |
20a004e7 | 194 | pmdp = pmd_set_fixmap_offset(pudp, addr); |
c1cc1552 | 195 | do { |
20a004e7 | 196 | pmd_t old_pmd = READ_ONCE(*pmdp); |
e98216b5 | 197 | |
c1cc1552 | 198 | next = pmd_addr_end(addr, end); |
e98216b5 | 199 | |
c1cc1552 | 200 | /* try section mapping first */ |
83863f25 | 201 | if (((addr | next | phys) & ~SECTION_MASK) == 0 && |
c0951366 | 202 | (flags & NO_BLOCK_MAPPINGS) == 0) { |
20a004e7 | 203 | pmd_set_huge(pmdp, phys, prot); |
e98216b5 | 204 | |
a55f9929 | 205 | /* |
e98216b5 AB |
206 | * After the PMD entry has been populated once, we |
207 | * only allow updates to the permission attributes. | |
a55f9929 | 208 | */ |
e98216b5 | 209 | BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), |
20a004e7 | 210 | READ_ONCE(pmd_val(*pmdp)))); |
a55f9929 | 211 | } else { |
20a004e7 | 212 | alloc_init_cont_pte(pmdp, addr, next, phys, prot, |
d27cfa1f | 213 | pgtable_alloc, flags); |
e98216b5 AB |
214 | |
215 | BUG_ON(pmd_val(old_pmd) != 0 && | |
20a004e7 | 216 | pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); |
a55f9929 | 217 | } |
c1cc1552 | 218 | phys += next - addr; |
20a004e7 | 219 | } while (pmdp++, addr = next, addr != end); |
f4710445 MR |
220 | |
221 | pmd_clear_fixmap(); | |
c1cc1552 CM |
222 | } |
223 | ||
20a004e7 | 224 | static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, |
d27cfa1f AB |
225 | unsigned long end, phys_addr_t phys, |
226 | pgprot_t prot, | |
227 | phys_addr_t (*pgtable_alloc)(void), int flags) | |
228 | { | |
229 | unsigned long next; | |
20a004e7 | 230 | pud_t pud = READ_ONCE(*pudp); |
d27cfa1f AB |
231 | |
232 | /* | |
233 | * Check for initial section mappings in the pgd/pud. | |
234 | */ | |
20a004e7 WD |
235 | BUG_ON(pud_sect(pud)); |
236 | if (pud_none(pud)) { | |
d27cfa1f AB |
237 | phys_addr_t pmd_phys; |
238 | BUG_ON(!pgtable_alloc); | |
239 | pmd_phys = pgtable_alloc(); | |
20a004e7 WD |
240 | __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); |
241 | pud = READ_ONCE(*pudp); | |
d27cfa1f | 242 | } |
20a004e7 | 243 | BUG_ON(pud_bad(pud)); |
d27cfa1f AB |
244 | |
245 | do { | |
246 | pgprot_t __prot = prot; | |
247 | ||
248 | next = pmd_cont_addr_end(addr, end); | |
249 | ||
250 | /* use a contiguous mapping if the range is suitably aligned */ | |
251 | if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && | |
252 | (flags & NO_CONT_MAPPINGS) == 0) | |
253 | __prot = __pgprot(pgprot_val(prot) | PTE_CONT); | |
254 | ||
20a004e7 | 255 | init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags); |
d27cfa1f AB |
256 | |
257 | phys += next - addr; | |
258 | } while (addr = next, addr != end); | |
259 | } | |
260 | ||
da141706 LA |
261 | static inline bool use_1G_block(unsigned long addr, unsigned long next, |
262 | unsigned long phys) | |
263 | { | |
264 | if (PAGE_SHIFT != 12) | |
265 | return false; | |
266 | ||
267 | if (((addr | next | phys) & ~PUD_MASK) != 0) | |
268 | return false; | |
269 | ||
270 | return true; | |
271 | } | |
272 | ||
20a004e7 WD |
273 | static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, |
274 | phys_addr_t phys, pgprot_t prot, | |
275 | phys_addr_t (*pgtable_alloc)(void), | |
276 | int flags) | |
c1cc1552 | 277 | { |
c1cc1552 | 278 | unsigned long next; |
20a004e7 WD |
279 | pud_t *pudp; |
280 | pgd_t pgd = READ_ONCE(*pgdp); | |
c1cc1552 | 281 | |
20a004e7 | 282 | if (pgd_none(pgd)) { |
132233a7 LA |
283 | phys_addr_t pud_phys; |
284 | BUG_ON(!pgtable_alloc); | |
285 | pud_phys = pgtable_alloc(); | |
20a004e7 WD |
286 | __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE); |
287 | pgd = READ_ONCE(*pgdp); | |
c79b954b | 288 | } |
20a004e7 | 289 | BUG_ON(pgd_bad(pgd)); |
c79b954b | 290 | |
20a004e7 | 291 | pudp = pud_set_fixmap_offset(pgdp, addr); |
c1cc1552 | 292 | do { |
20a004e7 | 293 | pud_t old_pud = READ_ONCE(*pudp); |
e98216b5 | 294 | |
c1cc1552 | 295 | next = pud_addr_end(addr, end); |
206a2a73 SC |
296 | |
297 | /* | |
298 | * For 4K granule only, attempt to put down a 1GB block | |
299 | */ | |
c0951366 AB |
300 | if (use_1G_block(addr, next, phys) && |
301 | (flags & NO_BLOCK_MAPPINGS) == 0) { | |
20a004e7 | 302 | pud_set_huge(pudp, phys, prot); |
206a2a73 SC |
303 | |
304 | /* | |
e98216b5 AB |
305 | * After the PUD entry has been populated once, we |
306 | * only allow updates to the permission attributes. | |
206a2a73 | 307 | */ |
e98216b5 | 308 | BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), |
20a004e7 | 309 | READ_ONCE(pud_val(*pudp)))); |
206a2a73 | 310 | } else { |
20a004e7 | 311 | alloc_init_cont_pmd(pudp, addr, next, phys, prot, |
d27cfa1f | 312 | pgtable_alloc, flags); |
e98216b5 AB |
313 | |
314 | BUG_ON(pud_val(old_pud) != 0 && | |
20a004e7 | 315 | pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); |
206a2a73 | 316 | } |
c1cc1552 | 317 | phys += next - addr; |
20a004e7 | 318 | } while (pudp++, addr = next, addr != end); |
f4710445 MR |
319 | |
320 | pud_clear_fixmap(); | |
c1cc1552 CM |
321 | } |
322 | ||
40f87d31 AB |
323 | static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, |
324 | unsigned long virt, phys_addr_t size, | |
325 | pgprot_t prot, | |
326 | phys_addr_t (*pgtable_alloc)(void), | |
c0951366 | 327 | int flags) |
c1cc1552 CM |
328 | { |
329 | unsigned long addr, length, end, next; | |
20a004e7 | 330 | pgd_t *pgdp = pgd_offset_raw(pgdir, virt); |
c1cc1552 | 331 | |
cc5d2b3b MR |
332 | /* |
333 | * If the virtual and physical address don't have the same offset | |
334 | * within a page, we cannot map the region as the caller expects. | |
335 | */ | |
336 | if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) | |
337 | return; | |
338 | ||
9c4e08a3 | 339 | phys &= PAGE_MASK; |
c1cc1552 CM |
340 | addr = virt & PAGE_MASK; |
341 | length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); | |
342 | ||
c1cc1552 CM |
343 | end = addr + length; |
344 | do { | |
345 | next = pgd_addr_end(addr, end); | |
20a004e7 | 346 | alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc, |
c0951366 | 347 | flags); |
c1cc1552 | 348 | phys += next - addr; |
20a004e7 | 349 | } while (pgdp++, addr = next, addr != end); |
c1cc1552 CM |
350 | } |
351 | ||
1378dc3d | 352 | static phys_addr_t pgd_pgtable_alloc(void) |
da141706 | 353 | { |
21ab99c2 | 354 | void *ptr = (void *)__get_free_page(PGALLOC_GFP); |
1378dc3d AB |
355 | if (!ptr || !pgtable_page_ctor(virt_to_page(ptr))) |
356 | BUG(); | |
21ab99c2 MR |
357 | |
358 | /* Ensure the zeroed page is visible to the page table walker */ | |
359 | dsb(ishst); | |
f4710445 | 360 | return __pa(ptr); |
da141706 LA |
361 | } |
362 | ||
132233a7 LA |
363 | /* |
364 | * This function can only be used to modify existing table entries, | |
365 | * without allocating new levels of table. Note that this permits the | |
366 | * creation of new section or page entries. | |
367 | */ | |
368 | static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, | |
da141706 | 369 | phys_addr_t size, pgprot_t prot) |
d7ecbddf MS |
370 | { |
371 | if (virt < VMALLOC_START) { | |
372 | pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", | |
373 | &phys, virt); | |
374 | return; | |
375 | } | |
d27cfa1f AB |
376 | __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, |
377 | NO_CONT_MAPPINGS); | |
d7ecbddf MS |
378 | } |
379 | ||
8ce837ce AB |
380 | void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, |
381 | unsigned long virt, phys_addr_t size, | |
f14c66ce | 382 | pgprot_t prot, bool page_mappings_only) |
8ce837ce | 383 | { |
c0951366 AB |
384 | int flags = 0; |
385 | ||
1378dc3d AB |
386 | BUG_ON(mm == &init_mm); |
387 | ||
c0951366 | 388 | if (page_mappings_only) |
d27cfa1f | 389 | flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; |
c0951366 | 390 | |
11509a30 | 391 | __create_pgd_mapping(mm->pgd, phys, virt, size, prot, |
c0951366 | 392 | pgd_pgtable_alloc, flags); |
d7ecbddf MS |
393 | } |
394 | ||
aa8c09be AB |
395 | static void update_mapping_prot(phys_addr_t phys, unsigned long virt, |
396 | phys_addr_t size, pgprot_t prot) | |
da141706 LA |
397 | { |
398 | if (virt < VMALLOC_START) { | |
aa8c09be | 399 | pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", |
da141706 LA |
400 | &phys, virt); |
401 | return; | |
402 | } | |
403 | ||
d27cfa1f AB |
404 | __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, |
405 | NO_CONT_MAPPINGS); | |
aa8c09be AB |
406 | |
407 | /* flush the TLBs after updating live kernel mappings */ | |
408 | flush_tlb_kernel_range(virt, virt + size); | |
da141706 LA |
409 | } |
410 | ||
20a004e7 | 411 | static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, |
98d2e153 TA |
412 | phys_addr_t end, pgprot_t prot, int flags) |
413 | { | |
20a004e7 | 414 | __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, |
98d2e153 TA |
415 | prot, early_pgtable_alloc, flags); |
416 | } | |
417 | ||
418 | void __init mark_linear_text_alias_ro(void) | |
419 | { | |
420 | /* | |
421 | * Remove the write permissions from the linear alias of .text/.rodata | |
422 | */ | |
423 | update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), | |
424 | (unsigned long)__init_begin - (unsigned long)_text, | |
425 | PAGE_KERNEL_RO); | |
426 | } | |
427 | ||
20a004e7 | 428 | static void __init map_mem(pgd_t *pgdp) |
da141706 | 429 | { |
eac8017f MC |
430 | phys_addr_t kernel_start = __pa_symbol(_text); |
431 | phys_addr_t kernel_end = __pa_symbol(__init_begin); | |
98d2e153 | 432 | struct memblock_region *reg; |
c0951366 AB |
433 | int flags = 0; |
434 | ||
435 | if (debug_pagealloc_enabled()) | |
d27cfa1f | 436 | flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; |
068a17a5 | 437 | |
da141706 | 438 | /* |
f9040773 AB |
439 | * Take care not to create a writable alias for the |
440 | * read-only text and rodata sections of the kernel image. | |
98d2e153 TA |
441 | * So temporarily mark them as NOMAP to skip mappings in |
442 | * the following for-loop | |
da141706 | 443 | */ |
98d2e153 TA |
444 | memblock_mark_nomap(kernel_start, kernel_end - kernel_start); |
445 | #ifdef CONFIG_KEXEC_CORE | |
446 | if (crashk_res.end) | |
447 | memblock_mark_nomap(crashk_res.start, | |
448 | resource_size(&crashk_res)); | |
449 | #endif | |
068a17a5 | 450 | |
98d2e153 TA |
451 | /* map all the memory banks */ |
452 | for_each_memblock(memory, reg) { | |
453 | phys_addr_t start = reg->base; | |
454 | phys_addr_t end = start + reg->size; | |
da141706 | 455 | |
98d2e153 TA |
456 | if (start >= end) |
457 | break; | |
458 | if (memblock_is_nomap(reg)) | |
459 | continue; | |
460 | ||
20a004e7 | 461 | __map_memblock(pgdp, start, end, PAGE_KERNEL, flags); |
98d2e153 | 462 | } |
f9040773 AB |
463 | |
464 | /* | |
5ea5306c AB |
465 | * Map the linear alias of the [_text, __init_begin) interval |
466 | * as non-executable now, and remove the write permission in | |
467 | * mark_linear_text_alias_ro() below (which will be called after | |
468 | * alternative patching has completed). This makes the contents | |
469 | * of the region accessible to subsystems such as hibernate, | |
470 | * but protects it from inadvertent modification or execution. | |
d27cfa1f AB |
471 | * Note that contiguous mappings cannot be remapped in this way, |
472 | * so we should avoid them here. | |
f9040773 | 473 | */ |
20a004e7 | 474 | __map_memblock(pgdp, kernel_start, kernel_end, |
98d2e153 TA |
475 | PAGE_KERNEL, NO_CONT_MAPPINGS); |
476 | memblock_clear_nomap(kernel_start, kernel_end - kernel_start); | |
da141706 | 477 | |
98d2e153 | 478 | #ifdef CONFIG_KEXEC_CORE |
5ea5306c | 479 | /* |
98d2e153 TA |
480 | * Use page-level mappings here so that we can shrink the region |
481 | * in page granularity and put back unused memory to buddy system | |
482 | * through /sys/kernel/kexec_crash_size interface. | |
5ea5306c | 483 | */ |
98d2e153 | 484 | if (crashk_res.end) { |
20a004e7 | 485 | __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1, |
98d2e153 TA |
486 | PAGE_KERNEL, |
487 | NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); | |
488 | memblock_clear_nomap(crashk_res.start, | |
489 | resource_size(&crashk_res)); | |
c1cc1552 | 490 | } |
98d2e153 | 491 | #endif |
c1cc1552 CM |
492 | } |
493 | ||
da141706 LA |
494 | void mark_rodata_ro(void) |
495 | { | |
2f39b5f9 | 496 | unsigned long section_size; |
f9040773 | 497 | |
2f39b5f9 | 498 | /* |
9fdc14c5 AB |
499 | * mark .rodata as read only. Use __init_begin rather than __end_rodata |
500 | * to cover NOTES and EXCEPTION_TABLE. | |
2f39b5f9 | 501 | */ |
9fdc14c5 | 502 | section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; |
aa8c09be | 503 | update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, |
2f39b5f9 | 504 | section_size, PAGE_KERNEL_RO); |
e98216b5 | 505 | |
1404d6f1 | 506 | debug_checkwx(); |
da141706 | 507 | } |
da141706 | 508 | |
20a004e7 | 509 | static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, |
d27cfa1f | 510 | pgprot_t prot, struct vm_struct *vma, |
92bbd16e | 511 | int flags, unsigned long vm_flags) |
068a17a5 | 512 | { |
2077be67 | 513 | phys_addr_t pa_start = __pa_symbol(va_start); |
068a17a5 MR |
514 | unsigned long size = va_end - va_start; |
515 | ||
516 | BUG_ON(!PAGE_ALIGNED(pa_start)); | |
517 | BUG_ON(!PAGE_ALIGNED(size)); | |
518 | ||
20a004e7 | 519 | __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot, |
d27cfa1f | 520 | early_pgtable_alloc, flags); |
f9040773 | 521 | |
92bbd16e WD |
522 | if (!(vm_flags & VM_NO_GUARD)) |
523 | size += PAGE_SIZE; | |
524 | ||
f9040773 AB |
525 | vma->addr = va_start; |
526 | vma->phys_addr = pa_start; | |
527 | vma->size = size; | |
92bbd16e | 528 | vma->flags = VM_MAP | vm_flags; |
f9040773 AB |
529 | vma->caller = __builtin_return_address(0); |
530 | ||
531 | vm_area_add_early(vma); | |
068a17a5 MR |
532 | } |
533 | ||
28b066da AB |
534 | static int __init parse_rodata(char *arg) |
535 | { | |
536 | return strtobool(arg, &rodata_enabled); | |
537 | } | |
538 | early_param("rodata", parse_rodata); | |
539 | ||
51a0048b WD |
540 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
541 | static int __init map_entry_trampoline(void) | |
542 | { | |
51a0048b WD |
543 | pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; |
544 | phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); | |
545 | ||
546 | /* The trampoline is always mapped and can therefore be global */ | |
547 | pgprot_val(prot) &= ~PTE_NG; | |
548 | ||
549 | /* Map only the text into the trampoline page table */ | |
550 | memset(tramp_pg_dir, 0, PGD_SIZE); | |
551 | __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, | |
552 | prot, pgd_pgtable_alloc, 0); | |
553 | ||
6c27c408 | 554 | /* Map both the text and data into the kernel page table */ |
51a0048b | 555 | __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); |
6c27c408 WD |
556 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { |
557 | extern char __entry_tramp_data_start[]; | |
558 | ||
559 | __set_fixmap(FIX_ENTRY_TRAMP_DATA, | |
560 | __pa_symbol(__entry_tramp_data_start), | |
561 | PAGE_KERNEL_RO); | |
562 | } | |
563 | ||
51a0048b WD |
564 | return 0; |
565 | } | |
566 | core_initcall(map_entry_trampoline); | |
567 | #endif | |
568 | ||
068a17a5 MR |
569 | /* |
570 | * Create fine-grained mappings for the kernel. | |
571 | */ | |
20a004e7 | 572 | static void __init map_kernel(pgd_t *pgdp) |
068a17a5 | 573 | { |
2ebe088b AB |
574 | static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext, |
575 | vmlinux_initdata, vmlinux_data; | |
068a17a5 | 576 | |
28b066da AB |
577 | /* |
578 | * External debuggers may need to write directly to the text | |
579 | * mapping to install SW breakpoints. Allow this (only) when | |
580 | * explicitly requested with rodata=off. | |
581 | */ | |
582 | pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; | |
583 | ||
d27cfa1f AB |
584 | /* |
585 | * Only rodata will be remapped with different permissions later on, | |
586 | * all other segments are allowed to use contiguous mappings. | |
587 | */ | |
20a004e7 | 588 | map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0, |
92bbd16e | 589 | VM_NO_GUARD); |
20a004e7 | 590 | map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL, |
92bbd16e | 591 | &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); |
20a004e7 | 592 | map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot, |
92bbd16e | 593 | &vmlinux_inittext, 0, VM_NO_GUARD); |
20a004e7 | 594 | map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL, |
92bbd16e | 595 | &vmlinux_initdata, 0, VM_NO_GUARD); |
20a004e7 | 596 | map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); |
068a17a5 | 597 | |
20a004e7 | 598 | if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) { |
f9040773 AB |
599 | /* |
600 | * The fixmap falls in a separate pgd to the kernel, and doesn't | |
601 | * live in the carveout for the swapper_pg_dir. We can simply | |
602 | * re-use the existing dir for the fixmap. | |
603 | */ | |
20a004e7 WD |
604 | set_pgd(pgd_offset_raw(pgdp, FIXADDR_START), |
605 | READ_ONCE(*pgd_offset_k(FIXADDR_START))); | |
f9040773 AB |
606 | } else if (CONFIG_PGTABLE_LEVELS > 3) { |
607 | /* | |
608 | * The fixmap shares its top level pgd entry with the kernel | |
609 | * mapping. This can really only occur when we are running | |
610 | * with 16k/4 levels, so we can simply reuse the pud level | |
611 | * entry instead. | |
612 | */ | |
613 | BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); | |
20a004e7 WD |
614 | pud_populate(&init_mm, |
615 | pud_set_fixmap_offset(pgdp, FIXADDR_START), | |
19338304 | 616 | lm_alias(bm_pmd)); |
f9040773 AB |
617 | pud_clear_fixmap(); |
618 | } else { | |
619 | BUG(); | |
620 | } | |
068a17a5 | 621 | |
20a004e7 | 622 | kasan_copy_shadow(pgdp); |
068a17a5 MR |
623 | } |
624 | ||
c1cc1552 CM |
625 | /* |
626 | * paging_init() sets up the page tables, initialises the zone memory | |
627 | * maps and sets up the zero page. | |
628 | */ | |
629 | void __init paging_init(void) | |
630 | { | |
068a17a5 | 631 | phys_addr_t pgd_phys = early_pgtable_alloc(); |
20a004e7 | 632 | pgd_t *pgdp = pgd_set_fixmap(pgd_phys); |
068a17a5 | 633 | |
20a004e7 WD |
634 | map_kernel(pgdp); |
635 | map_mem(pgdp); | |
068a17a5 MR |
636 | |
637 | /* | |
638 | * We want to reuse the original swapper_pg_dir so we don't have to | |
639 | * communicate the new address to non-coherent secondaries in | |
640 | * secondary_entry, and so cpu_switch_mm can generate the address with | |
641 | * adrp+add rather than a load from some global variable. | |
642 | * | |
643 | * To do this we need to go via a temporary pgd. | |
644 | */ | |
645 | cpu_replace_ttbr1(__va(pgd_phys)); | |
20a004e7 | 646 | memcpy(swapper_pg_dir, pgdp, PGD_SIZE); |
2077be67 | 647 | cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); |
068a17a5 MR |
648 | |
649 | pgd_clear_fixmap(); | |
650 | memblock_free(pgd_phys, PAGE_SIZE); | |
651 | ||
652 | /* | |
653 | * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd | |
654 | * allocated with it. | |
655 | */ | |
2077be67 | 656 | memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE, |
0370b31e SC |
657 | __pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir) |
658 | - PAGE_SIZE); | |
c1cc1552 CM |
659 | } |
660 | ||
c1cc1552 CM |
661 | /* |
662 | * Check whether a kernel address is valid (derived from arch/x86/). | |
663 | */ | |
664 | int kern_addr_valid(unsigned long addr) | |
665 | { | |
20a004e7 WD |
666 | pgd_t *pgdp; |
667 | pud_t *pudp, pud; | |
668 | pmd_t *pmdp, pmd; | |
669 | pte_t *ptep, pte; | |
c1cc1552 CM |
670 | |
671 | if ((((long)addr) >> VA_BITS) != -1UL) | |
672 | return 0; | |
673 | ||
20a004e7 WD |
674 | pgdp = pgd_offset_k(addr); |
675 | if (pgd_none(READ_ONCE(*pgdp))) | |
c1cc1552 CM |
676 | return 0; |
677 | ||
20a004e7 WD |
678 | pudp = pud_offset(pgdp, addr); |
679 | pud = READ_ONCE(*pudp); | |
680 | if (pud_none(pud)) | |
c1cc1552 CM |
681 | return 0; |
682 | ||
20a004e7 WD |
683 | if (pud_sect(pud)) |
684 | return pfn_valid(pud_pfn(pud)); | |
206a2a73 | 685 | |
20a004e7 WD |
686 | pmdp = pmd_offset(pudp, addr); |
687 | pmd = READ_ONCE(*pmdp); | |
688 | if (pmd_none(pmd)) | |
c1cc1552 CM |
689 | return 0; |
690 | ||
20a004e7 WD |
691 | if (pmd_sect(pmd)) |
692 | return pfn_valid(pmd_pfn(pmd)); | |
da6e4cb6 | 693 | |
20a004e7 WD |
694 | ptep = pte_offset_kernel(pmdp, addr); |
695 | pte = READ_ONCE(*ptep); | |
696 | if (pte_none(pte)) | |
c1cc1552 CM |
697 | return 0; |
698 | ||
20a004e7 | 699 | return pfn_valid(pte_pfn(pte)); |
c1cc1552 CM |
700 | } |
701 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
b433dce0 | 702 | #if !ARM64_SWAPPER_USES_SECTION_MAPS |
7b73d978 CH |
703 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
704 | struct vmem_altmap *altmap) | |
c1cc1552 | 705 | { |
0aad818b | 706 | return vmemmap_populate_basepages(start, end, node); |
c1cc1552 | 707 | } |
b433dce0 | 708 | #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ |
7b73d978 CH |
709 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
710 | struct vmem_altmap *altmap) | |
c1cc1552 | 711 | { |
0aad818b | 712 | unsigned long addr = start; |
c1cc1552 | 713 | unsigned long next; |
20a004e7 WD |
714 | pgd_t *pgdp; |
715 | pud_t *pudp; | |
716 | pmd_t *pmdp; | |
c1cc1552 CM |
717 | |
718 | do { | |
719 | next = pmd_addr_end(addr, end); | |
720 | ||
20a004e7 WD |
721 | pgdp = vmemmap_pgd_populate(addr, node); |
722 | if (!pgdp) | |
c1cc1552 CM |
723 | return -ENOMEM; |
724 | ||
20a004e7 WD |
725 | pudp = vmemmap_pud_populate(pgdp, addr, node); |
726 | if (!pudp) | |
c1cc1552 CM |
727 | return -ENOMEM; |
728 | ||
20a004e7 WD |
729 | pmdp = pmd_offset(pudp, addr); |
730 | if (pmd_none(READ_ONCE(*pmdp))) { | |
c1cc1552 CM |
731 | void *p = NULL; |
732 | ||
733 | p = vmemmap_alloc_block_buf(PMD_SIZE, node); | |
734 | if (!p) | |
735 | return -ENOMEM; | |
736 | ||
20a004e7 | 737 | pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); |
c1cc1552 | 738 | } else |
20a004e7 | 739 | vmemmap_verify((pte_t *)pmdp, node, addr, next); |
c1cc1552 CM |
740 | } while (addr = next, addr != end); |
741 | ||
742 | return 0; | |
743 | } | |
744 | #endif /* CONFIG_ARM64_64K_PAGES */ | |
24b6d416 CH |
745 | void vmemmap_free(unsigned long start, unsigned long end, |
746 | struct vmem_altmap *altmap) | |
0197518c TC |
747 | { |
748 | } | |
c1cc1552 | 749 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
af86e597 | 750 | |
af86e597 LA |
751 | static inline pud_t * fixmap_pud(unsigned long addr) |
752 | { | |
20a004e7 WD |
753 | pgd_t *pgdp = pgd_offset_k(addr); |
754 | pgd_t pgd = READ_ONCE(*pgdp); | |
af86e597 | 755 | |
20a004e7 | 756 | BUG_ON(pgd_none(pgd) || pgd_bad(pgd)); |
af86e597 | 757 | |
20a004e7 | 758 | return pud_offset_kimg(pgdp, addr); |
af86e597 LA |
759 | } |
760 | ||
761 | static inline pmd_t * fixmap_pmd(unsigned long addr) | |
762 | { | |
20a004e7 WD |
763 | pud_t *pudp = fixmap_pud(addr); |
764 | pud_t pud = READ_ONCE(*pudp); | |
af86e597 | 765 | |
20a004e7 | 766 | BUG_ON(pud_none(pud) || pud_bad(pud)); |
af86e597 | 767 | |
20a004e7 | 768 | return pmd_offset_kimg(pudp, addr); |
af86e597 LA |
769 | } |
770 | ||
771 | static inline pte_t * fixmap_pte(unsigned long addr) | |
772 | { | |
157962f5 | 773 | return &bm_pte[pte_index(addr)]; |
af86e597 LA |
774 | } |
775 | ||
2077be67 LA |
776 | /* |
777 | * The p*d_populate functions call virt_to_phys implicitly so they can't be used | |
778 | * directly on kernel symbols (bm_p*d). This function is called too early to use | |
779 | * lm_alias so __p*d_populate functions must be used to populate with the | |
780 | * physical address from __pa_symbol. | |
781 | */ | |
af86e597 LA |
782 | void __init early_fixmap_init(void) |
783 | { | |
20a004e7 WD |
784 | pgd_t *pgdp, pgd; |
785 | pud_t *pudp; | |
786 | pmd_t *pmdp; | |
af86e597 LA |
787 | unsigned long addr = FIXADDR_START; |
788 | ||
20a004e7 WD |
789 | pgdp = pgd_offset_k(addr); |
790 | pgd = READ_ONCE(*pgdp); | |
f80fb3a3 | 791 | if (CONFIG_PGTABLE_LEVELS > 3 && |
20a004e7 | 792 | !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) { |
f9040773 AB |
793 | /* |
794 | * We only end up here if the kernel mapping and the fixmap | |
795 | * share the top level pgd entry, which should only happen on | |
796 | * 16k/4 levels configurations. | |
797 | */ | |
798 | BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); | |
20a004e7 | 799 | pudp = pud_offset_kimg(pgdp, addr); |
f9040773 | 800 | } else { |
20a004e7 WD |
801 | if (pgd_none(pgd)) |
802 | __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE); | |
803 | pudp = fixmap_pud(addr); | |
f9040773 | 804 | } |
20a004e7 WD |
805 | if (pud_none(READ_ONCE(*pudp))) |
806 | __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); | |
807 | pmdp = fixmap_pmd(addr); | |
808 | __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); | |
af86e597 LA |
809 | |
810 | /* | |
811 | * The boot-ioremap range spans multiple pmds, for which | |
157962f5 | 812 | * we are not prepared: |
af86e597 LA |
813 | */ |
814 | BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) | |
815 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); | |
816 | ||
20a004e7 WD |
817 | if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) |
818 | || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { | |
af86e597 | 819 | WARN_ON(1); |
20a004e7 WD |
820 | pr_warn("pmdp %p != %p, %p\n", |
821 | pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), | |
af86e597 LA |
822 | fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); |
823 | pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", | |
824 | fix_to_virt(FIX_BTMAP_BEGIN)); | |
825 | pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", | |
826 | fix_to_virt(FIX_BTMAP_END)); | |
827 | ||
828 | pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); | |
829 | pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); | |
830 | } | |
831 | } | |
832 | ||
18b4b276 JM |
833 | /* |
834 | * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we | |
835 | * ever need to use IPIs for TLB broadcasting, then we're in trouble here. | |
836 | */ | |
af86e597 LA |
837 | void __set_fixmap(enum fixed_addresses idx, |
838 | phys_addr_t phys, pgprot_t flags) | |
839 | { | |
840 | unsigned long addr = __fix_to_virt(idx); | |
20a004e7 | 841 | pte_t *ptep; |
af86e597 | 842 | |
b63dbef9 | 843 | BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); |
af86e597 | 844 | |
20a004e7 | 845 | ptep = fixmap_pte(addr); |
af86e597 LA |
846 | |
847 | if (pgprot_val(flags)) { | |
20a004e7 | 848 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); |
af86e597 | 849 | } else { |
20a004e7 | 850 | pte_clear(&init_mm, addr, ptep); |
af86e597 LA |
851 | flush_tlb_kernel_range(addr, addr+PAGE_SIZE); |
852 | } | |
853 | } | |
61bd93ce | 854 | |
f80fb3a3 | 855 | void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) |
61bd93ce AB |
856 | { |
857 | const u64 dt_virt_base = __fix_to_virt(FIX_FDT); | |
f80fb3a3 | 858 | int offset; |
61bd93ce AB |
859 | void *dt_virt; |
860 | ||
861 | /* | |
862 | * Check whether the physical FDT address is set and meets the minimum | |
863 | * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be | |
04a84810 AB |
864 | * at least 8 bytes so that we can always access the magic and size |
865 | * fields of the FDT header after mapping the first chunk, double check | |
866 | * here if that is indeed the case. | |
61bd93ce AB |
867 | */ |
868 | BUILD_BUG_ON(MIN_FDT_ALIGN < 8); | |
869 | if (!dt_phys || dt_phys % MIN_FDT_ALIGN) | |
870 | return NULL; | |
871 | ||
872 | /* | |
873 | * Make sure that the FDT region can be mapped without the need to | |
874 | * allocate additional translation table pages, so that it is safe | |
132233a7 | 875 | * to call create_mapping_noalloc() this early. |
61bd93ce AB |
876 | * |
877 | * On 64k pages, the FDT will be mapped using PTEs, so we need to | |
878 | * be in the same PMD as the rest of the fixmap. | |
879 | * On 4k pages, we'll use section mappings for the FDT so we only | |
880 | * have to be in the same PUD. | |
881 | */ | |
882 | BUILD_BUG_ON(dt_virt_base % SZ_2M); | |
883 | ||
b433dce0 SP |
884 | BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != |
885 | __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); | |
61bd93ce | 886 | |
b433dce0 | 887 | offset = dt_phys % SWAPPER_BLOCK_SIZE; |
61bd93ce AB |
888 | dt_virt = (void *)dt_virt_base + offset; |
889 | ||
890 | /* map the first chunk so we can read the size from the header */ | |
132233a7 LA |
891 | create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), |
892 | dt_virt_base, SWAPPER_BLOCK_SIZE, prot); | |
61bd93ce | 893 | |
04a84810 | 894 | if (fdt_magic(dt_virt) != FDT_MAGIC) |
61bd93ce AB |
895 | return NULL; |
896 | ||
f80fb3a3 AB |
897 | *size = fdt_totalsize(dt_virt); |
898 | if (*size > MAX_FDT_SIZE) | |
61bd93ce AB |
899 | return NULL; |
900 | ||
f80fb3a3 | 901 | if (offset + *size > SWAPPER_BLOCK_SIZE) |
132233a7 | 902 | create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, |
f80fb3a3 | 903 | round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); |
61bd93ce | 904 | |
f80fb3a3 AB |
905 | return dt_virt; |
906 | } | |
61bd93ce | 907 | |
f80fb3a3 AB |
908 | void *__init fixmap_remap_fdt(phys_addr_t dt_phys) |
909 | { | |
910 | void *dt_virt; | |
911 | int size; | |
912 | ||
913 | dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); | |
914 | if (!dt_virt) | |
915 | return NULL; | |
916 | ||
917 | memblock_reserve(dt_phys, size); | |
61bd93ce AB |
918 | return dt_virt; |
919 | } | |
324420bf AB |
920 | |
921 | int __init arch_ioremap_pud_supported(void) | |
922 | { | |
923 | /* only 4k granule supports level 1 block mappings */ | |
924 | return IS_ENABLED(CONFIG_ARM64_4K_PAGES); | |
925 | } | |
926 | ||
927 | int __init arch_ioremap_pmd_supported(void) | |
928 | { | |
929 | return 1; | |
930 | } | |
931 | ||
20a004e7 | 932 | int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) |
324420bf | 933 | { |
19338304 KM |
934 | pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | |
935 | pgprot_val(mk_sect_prot(prot))); | |
324420bf | 936 | BUG_ON(phys & ~PUD_MASK); |
20a004e7 | 937 | set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot)); |
324420bf AB |
938 | return 1; |
939 | } | |
940 | ||
20a004e7 | 941 | int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) |
324420bf | 942 | { |
19338304 KM |
943 | pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | |
944 | pgprot_val(mk_sect_prot(prot))); | |
324420bf | 945 | BUG_ON(phys & ~PMD_MASK); |
20a004e7 | 946 | set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot)); |
324420bf AB |
947 | return 1; |
948 | } | |
949 | ||
20a004e7 | 950 | int pud_clear_huge(pud_t *pudp) |
324420bf | 951 | { |
20a004e7 | 952 | if (!pud_sect(READ_ONCE(*pudp))) |
324420bf | 953 | return 0; |
20a004e7 | 954 | pud_clear(pudp); |
324420bf AB |
955 | return 1; |
956 | } | |
957 | ||
20a004e7 | 958 | int pmd_clear_huge(pmd_t *pmdp) |
324420bf | 959 | { |
20a004e7 | 960 | if (!pmd_sect(READ_ONCE(*pmdp))) |
324420bf | 961 | return 0; |
20a004e7 | 962 | pmd_clear(pmdp); |
324420bf AB |
963 | return 1; |
964 | } |