Commit | Line | Data |
---|---|---|
c1cc1552 CM |
1 | /* |
2 | * Based on arch/arm/mm/mmu.c | |
3 | * | |
4 | * Copyright (C) 1995-2005 Russell King | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
5a9e3e15 | 20 | #include <linux/cache.h> |
c1cc1552 CM |
21 | #include <linux/export.h> |
22 | #include <linux/kernel.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/init.h> | |
98d2e153 TA |
25 | #include <linux/ioport.h> |
26 | #include <linux/kexec.h> | |
61bd93ce | 27 | #include <linux/libfdt.h> |
c1cc1552 CM |
28 | #include <linux/mman.h> |
29 | #include <linux/nodemask.h> | |
30 | #include <linux/memblock.h> | |
31 | #include <linux/fs.h> | |
2475ff9d | 32 | #include <linux/io.h> |
2077be67 | 33 | #include <linux/mm.h> |
6efd8499 | 34 | #include <linux/vmalloc.h> |
c1cc1552 | 35 | |
21ab99c2 | 36 | #include <asm/barrier.h> |
c1cc1552 | 37 | #include <asm/cputype.h> |
af86e597 | 38 | #include <asm/fixmap.h> |
068a17a5 | 39 | #include <asm/kasan.h> |
b433dce0 | 40 | #include <asm/kernel-pgtable.h> |
c1cc1552 CM |
41 | #include <asm/sections.h> |
42 | #include <asm/setup.h> | |
43 | #include <asm/sizes.h> | |
44 | #include <asm/tlb.h> | |
45 | #include <asm/mmu_context.h> | |
1404d6f1 | 46 | #include <asm/ptdump.h> |
ec28bb9c | 47 | #include <asm/tlbflush.h> |
c1cc1552 | 48 | |
c0951366 | 49 | #define NO_BLOCK_MAPPINGS BIT(0) |
d27cfa1f | 50 | #define NO_CONT_MAPPINGS BIT(1) |
c0951366 | 51 | |
dd006da2 | 52 | u64 idmap_t0sz = TCR_T0SZ(VA_BITS); |
fa2a8445 | 53 | u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; |
67e7fdfc | 54 | u64 vabits_user __ro_after_init; |
4a1daf29 | 55 | EXPORT_SYMBOL(vabits_user); |
dd006da2 | 56 | |
5a9e3e15 | 57 | u64 kimage_voffset __ro_after_init; |
a7f8de16 AB |
58 | EXPORT_SYMBOL(kimage_voffset); |
59 | ||
c1cc1552 CM |
60 | /* |
61 | * Empty_zero_page is a special page that is used for zero-initialized data | |
62 | * and COW. | |
63 | */ | |
5227cfa7 | 64 | unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; |
c1cc1552 CM |
65 | EXPORT_SYMBOL(empty_zero_page); |
66 | ||
f9040773 AB |
67 | static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; |
68 | static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; | |
69 | static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; | |
70 | ||
2330b7ca JY |
71 | static DEFINE_SPINLOCK(swapper_pgdir_lock); |
72 | ||
73 | void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) | |
74 | { | |
75 | pgd_t *fixmap_pgdp; | |
76 | ||
77 | spin_lock(&swapper_pgdir_lock); | |
26a6f87e | 78 | fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp)); |
2330b7ca JY |
79 | WRITE_ONCE(*fixmap_pgdp, pgd); |
80 | /* | |
81 | * We need dsb(ishst) here to ensure the page-table-walker sees | |
82 | * our new entry before set_p?d() returns. The fixmap's | |
83 | * flush_tlb_kernel_range() via clear_fixmap() does this for us. | |
84 | */ | |
85 | pgd_clear_fixmap(); | |
86 | spin_unlock(&swapper_pgdir_lock); | |
87 | } | |
88 | ||
c1cc1552 CM |
89 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
90 | unsigned long size, pgprot_t vma_prot) | |
91 | { | |
92 | if (!pfn_valid(pfn)) | |
93 | return pgprot_noncached(vma_prot); | |
94 | else if (file->f_flags & O_SYNC) | |
95 | return pgprot_writecombine(vma_prot); | |
96 | return vma_prot; | |
97 | } | |
98 | EXPORT_SYMBOL(phys_mem_access_prot); | |
99 | ||
90292aca | 100 | static phys_addr_t __init early_pgtable_alloc(int shift) |
c1cc1552 | 101 | { |
7142392d SP |
102 | phys_addr_t phys; |
103 | void *ptr; | |
104 | ||
9a8dd708 | 105 | phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); |
ecc3e771 MR |
106 | if (!phys) |
107 | panic("Failed to allocate page table page\n"); | |
f4710445 MR |
108 | |
109 | /* | |
110 | * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE | |
111 | * slot will be free, so we can (ab)use the FIX_PTE slot to initialise | |
112 | * any level of table. | |
113 | */ | |
114 | ptr = pte_set_fixmap(phys); | |
115 | ||
21ab99c2 MR |
116 | memset(ptr, 0, PAGE_SIZE); |
117 | ||
f4710445 MR |
118 | /* |
119 | * Implicit barriers also ensure the zeroed page is visible to the page | |
120 | * table walker | |
121 | */ | |
122 | pte_clear_fixmap(); | |
123 | ||
124 | return phys; | |
c1cc1552 CM |
125 | } |
126 | ||
e98216b5 AB |
127 | static bool pgattr_change_is_safe(u64 old, u64 new) |
128 | { | |
129 | /* | |
130 | * The following mapping attributes may be updated in live | |
131 | * kernel mappings without the need for break-before-make. | |
132 | */ | |
753e8abc | 133 | static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; |
e98216b5 | 134 | |
141d1497 AB |
135 | /* creating or taking down mappings is always safe */ |
136 | if (old == 0 || new == 0) | |
137 | return true; | |
138 | ||
139 | /* live contiguous mappings may not be manipulated at all */ | |
140 | if ((old | new) & PTE_CONT) | |
141 | return false; | |
142 | ||
753e8abc AB |
143 | /* Transitioning from Non-Global to Global is unsafe */ |
144 | if (old & ~new & PTE_NG) | |
145 | return false; | |
4e602056 | 146 | |
141d1497 | 147 | return ((old ^ new) & ~mask) == 0; |
e98216b5 AB |
148 | } |
149 | ||
20a004e7 | 150 | static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, |
d27cfa1f | 151 | phys_addr_t phys, pgprot_t prot) |
c1cc1552 | 152 | { |
20a004e7 | 153 | pte_t *ptep; |
c1cc1552 | 154 | |
20a004e7 | 155 | ptep = pte_set_fixmap_offset(pmdp, addr); |
c1cc1552 | 156 | do { |
20a004e7 | 157 | pte_t old_pte = READ_ONCE(*ptep); |
e98216b5 | 158 | |
20a004e7 | 159 | set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); |
e98216b5 AB |
160 | |
161 | /* | |
162 | * After the PTE entry has been populated once, we | |
163 | * only allow updates to the permission attributes. | |
164 | */ | |
20a004e7 WD |
165 | BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), |
166 | READ_ONCE(pte_val(*ptep)))); | |
e98216b5 | 167 | |
e393cf40 | 168 | phys += PAGE_SIZE; |
20a004e7 | 169 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
f4710445 MR |
170 | |
171 | pte_clear_fixmap(); | |
c1cc1552 CM |
172 | } |
173 | ||
20a004e7 | 174 | static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, |
d27cfa1f AB |
175 | unsigned long end, phys_addr_t phys, |
176 | pgprot_t prot, | |
90292aca | 177 | phys_addr_t (*pgtable_alloc)(int), |
d27cfa1f | 178 | int flags) |
c1cc1552 | 179 | { |
c1cc1552 | 180 | unsigned long next; |
20a004e7 | 181 | pmd_t pmd = READ_ONCE(*pmdp); |
c1cc1552 | 182 | |
20a004e7 WD |
183 | BUG_ON(pmd_sect(pmd)); |
184 | if (pmd_none(pmd)) { | |
d27cfa1f | 185 | phys_addr_t pte_phys; |
132233a7 | 186 | BUG_ON(!pgtable_alloc); |
90292aca | 187 | pte_phys = pgtable_alloc(PAGE_SHIFT); |
20a004e7 WD |
188 | __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); |
189 | pmd = READ_ONCE(*pmdp); | |
c1cc1552 | 190 | } |
20a004e7 | 191 | BUG_ON(pmd_bad(pmd)); |
d27cfa1f AB |
192 | |
193 | do { | |
194 | pgprot_t __prot = prot; | |
195 | ||
196 | next = pte_cont_addr_end(addr, end); | |
197 | ||
198 | /* use a contiguous mapping if the range is suitably aligned */ | |
199 | if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && | |
200 | (flags & NO_CONT_MAPPINGS) == 0) | |
201 | __prot = __pgprot(pgprot_val(prot) | PTE_CONT); | |
202 | ||
20a004e7 | 203 | init_pte(pmdp, addr, next, phys, __prot); |
d27cfa1f AB |
204 | |
205 | phys += next - addr; | |
206 | } while (addr = next, addr != end); | |
207 | } | |
208 | ||
20a004e7 | 209 | static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, |
d27cfa1f | 210 | phys_addr_t phys, pgprot_t prot, |
90292aca | 211 | phys_addr_t (*pgtable_alloc)(int), int flags) |
d27cfa1f AB |
212 | { |
213 | unsigned long next; | |
20a004e7 | 214 | pmd_t *pmdp; |
c1cc1552 | 215 | |
20a004e7 | 216 | pmdp = pmd_set_fixmap_offset(pudp, addr); |
c1cc1552 | 217 | do { |
20a004e7 | 218 | pmd_t old_pmd = READ_ONCE(*pmdp); |
e98216b5 | 219 | |
c1cc1552 | 220 | next = pmd_addr_end(addr, end); |
e98216b5 | 221 | |
c1cc1552 | 222 | /* try section mapping first */ |
83863f25 | 223 | if (((addr | next | phys) & ~SECTION_MASK) == 0 && |
c0951366 | 224 | (flags & NO_BLOCK_MAPPINGS) == 0) { |
20a004e7 | 225 | pmd_set_huge(pmdp, phys, prot); |
e98216b5 | 226 | |
a55f9929 | 227 | /* |
e98216b5 AB |
228 | * After the PMD entry has been populated once, we |
229 | * only allow updates to the permission attributes. | |
a55f9929 | 230 | */ |
e98216b5 | 231 | BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), |
20a004e7 | 232 | READ_ONCE(pmd_val(*pmdp)))); |
a55f9929 | 233 | } else { |
20a004e7 | 234 | alloc_init_cont_pte(pmdp, addr, next, phys, prot, |
d27cfa1f | 235 | pgtable_alloc, flags); |
e98216b5 AB |
236 | |
237 | BUG_ON(pmd_val(old_pmd) != 0 && | |
20a004e7 | 238 | pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); |
a55f9929 | 239 | } |
c1cc1552 | 240 | phys += next - addr; |
20a004e7 | 241 | } while (pmdp++, addr = next, addr != end); |
f4710445 MR |
242 | |
243 | pmd_clear_fixmap(); | |
c1cc1552 CM |
244 | } |
245 | ||
20a004e7 | 246 | static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, |
d27cfa1f AB |
247 | unsigned long end, phys_addr_t phys, |
248 | pgprot_t prot, | |
90292aca | 249 | phys_addr_t (*pgtable_alloc)(int), int flags) |
d27cfa1f AB |
250 | { |
251 | unsigned long next; | |
20a004e7 | 252 | pud_t pud = READ_ONCE(*pudp); |
d27cfa1f AB |
253 | |
254 | /* | |
255 | * Check for initial section mappings in the pgd/pud. | |
256 | */ | |
20a004e7 WD |
257 | BUG_ON(pud_sect(pud)); |
258 | if (pud_none(pud)) { | |
d27cfa1f AB |
259 | phys_addr_t pmd_phys; |
260 | BUG_ON(!pgtable_alloc); | |
90292aca | 261 | pmd_phys = pgtable_alloc(PMD_SHIFT); |
20a004e7 WD |
262 | __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); |
263 | pud = READ_ONCE(*pudp); | |
d27cfa1f | 264 | } |
20a004e7 | 265 | BUG_ON(pud_bad(pud)); |
d27cfa1f AB |
266 | |
267 | do { | |
268 | pgprot_t __prot = prot; | |
269 | ||
270 | next = pmd_cont_addr_end(addr, end); | |
271 | ||
272 | /* use a contiguous mapping if the range is suitably aligned */ | |
273 | if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && | |
274 | (flags & NO_CONT_MAPPINGS) == 0) | |
275 | __prot = __pgprot(pgprot_val(prot) | PTE_CONT); | |
276 | ||
20a004e7 | 277 | init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags); |
d27cfa1f AB |
278 | |
279 | phys += next - addr; | |
280 | } while (addr = next, addr != end); | |
281 | } | |
282 | ||
da141706 LA |
283 | static inline bool use_1G_block(unsigned long addr, unsigned long next, |
284 | unsigned long phys) | |
285 | { | |
286 | if (PAGE_SHIFT != 12) | |
287 | return false; | |
288 | ||
289 | if (((addr | next | phys) & ~PUD_MASK) != 0) | |
290 | return false; | |
291 | ||
292 | return true; | |
293 | } | |
294 | ||
20a004e7 WD |
295 | static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, |
296 | phys_addr_t phys, pgprot_t prot, | |
90292aca | 297 | phys_addr_t (*pgtable_alloc)(int), |
20a004e7 | 298 | int flags) |
c1cc1552 | 299 | { |
c1cc1552 | 300 | unsigned long next; |
20a004e7 WD |
301 | pud_t *pudp; |
302 | pgd_t pgd = READ_ONCE(*pgdp); | |
c1cc1552 | 303 | |
20a004e7 | 304 | if (pgd_none(pgd)) { |
132233a7 LA |
305 | phys_addr_t pud_phys; |
306 | BUG_ON(!pgtable_alloc); | |
90292aca | 307 | pud_phys = pgtable_alloc(PUD_SHIFT); |
20a004e7 WD |
308 | __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE); |
309 | pgd = READ_ONCE(*pgdp); | |
c79b954b | 310 | } |
20a004e7 | 311 | BUG_ON(pgd_bad(pgd)); |
c79b954b | 312 | |
20a004e7 | 313 | pudp = pud_set_fixmap_offset(pgdp, addr); |
c1cc1552 | 314 | do { |
20a004e7 | 315 | pud_t old_pud = READ_ONCE(*pudp); |
e98216b5 | 316 | |
c1cc1552 | 317 | next = pud_addr_end(addr, end); |
206a2a73 SC |
318 | |
319 | /* | |
320 | * For 4K granule only, attempt to put down a 1GB block | |
321 | */ | |
c0951366 AB |
322 | if (use_1G_block(addr, next, phys) && |
323 | (flags & NO_BLOCK_MAPPINGS) == 0) { | |
20a004e7 | 324 | pud_set_huge(pudp, phys, prot); |
206a2a73 SC |
325 | |
326 | /* | |
e98216b5 AB |
327 | * After the PUD entry has been populated once, we |
328 | * only allow updates to the permission attributes. | |
206a2a73 | 329 | */ |
e98216b5 | 330 | BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), |
20a004e7 | 331 | READ_ONCE(pud_val(*pudp)))); |
206a2a73 | 332 | } else { |
20a004e7 | 333 | alloc_init_cont_pmd(pudp, addr, next, phys, prot, |
d27cfa1f | 334 | pgtable_alloc, flags); |
e98216b5 AB |
335 | |
336 | BUG_ON(pud_val(old_pud) != 0 && | |
20a004e7 | 337 | pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); |
206a2a73 | 338 | } |
c1cc1552 | 339 | phys += next - addr; |
20a004e7 | 340 | } while (pudp++, addr = next, addr != end); |
f4710445 MR |
341 | |
342 | pud_clear_fixmap(); | |
c1cc1552 CM |
343 | } |
344 | ||
40f87d31 AB |
345 | static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, |
346 | unsigned long virt, phys_addr_t size, | |
347 | pgprot_t prot, | |
90292aca | 348 | phys_addr_t (*pgtable_alloc)(int), |
c0951366 | 349 | int flags) |
c1cc1552 CM |
350 | { |
351 | unsigned long addr, length, end, next; | |
20a004e7 | 352 | pgd_t *pgdp = pgd_offset_raw(pgdir, virt); |
c1cc1552 | 353 | |
cc5d2b3b MR |
354 | /* |
355 | * If the virtual and physical address don't have the same offset | |
356 | * within a page, we cannot map the region as the caller expects. | |
357 | */ | |
358 | if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) | |
359 | return; | |
360 | ||
9c4e08a3 | 361 | phys &= PAGE_MASK; |
c1cc1552 CM |
362 | addr = virt & PAGE_MASK; |
363 | length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); | |
364 | ||
c1cc1552 CM |
365 | end = addr + length; |
366 | do { | |
367 | next = pgd_addr_end(addr, end); | |
20a004e7 | 368 | alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc, |
c0951366 | 369 | flags); |
c1cc1552 | 370 | phys += next - addr; |
20a004e7 | 371 | } while (pgdp++, addr = next, addr != end); |
c1cc1552 CM |
372 | } |
373 | ||
475ba3fc | 374 | static phys_addr_t __pgd_pgtable_alloc(int shift) |
369aaab8 YZ |
375 | { |
376 | void *ptr = (void *)__get_free_page(PGALLOC_GFP); | |
377 | BUG_ON(!ptr); | |
378 | ||
379 | /* Ensure the zeroed page is visible to the page table walker */ | |
380 | dsb(ishst); | |
381 | return __pa(ptr); | |
382 | } | |
383 | ||
90292aca | 384 | static phys_addr_t pgd_pgtable_alloc(int shift) |
da141706 | 385 | { |
475ba3fc | 386 | phys_addr_t pa = __pgd_pgtable_alloc(shift); |
90292aca YZ |
387 | |
388 | /* | |
389 | * Call proper page table ctor in case later we need to | |
390 | * call core mm functions like apply_to_page_range() on | |
391 | * this pre-allocated page table. | |
392 | * | |
393 | * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is | |
394 | * folded, and if so pgtable_pmd_page_ctor() becomes nop. | |
395 | */ | |
396 | if (shift == PAGE_SHIFT) | |
475ba3fc | 397 | BUG_ON(!pgtable_page_ctor(phys_to_page(pa))); |
90292aca | 398 | else if (shift == PMD_SHIFT) |
475ba3fc | 399 | BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa))); |
21ab99c2 | 400 | |
475ba3fc | 401 | return pa; |
da141706 LA |
402 | } |
403 | ||
132233a7 LA |
404 | /* |
405 | * This function can only be used to modify existing table entries, | |
406 | * without allocating new levels of table. Note that this permits the | |
407 | * creation of new section or page entries. | |
408 | */ | |
409 | static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, | |
da141706 | 410 | phys_addr_t size, pgprot_t prot) |
d7ecbddf MS |
411 | { |
412 | if (virt < VMALLOC_START) { | |
413 | pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", | |
414 | &phys, virt); | |
415 | return; | |
416 | } | |
d27cfa1f AB |
417 | __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, |
418 | NO_CONT_MAPPINGS); | |
d7ecbddf MS |
419 | } |
420 | ||
8ce837ce AB |
421 | void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, |
422 | unsigned long virt, phys_addr_t size, | |
f14c66ce | 423 | pgprot_t prot, bool page_mappings_only) |
8ce837ce | 424 | { |
c0951366 AB |
425 | int flags = 0; |
426 | ||
1378dc3d AB |
427 | BUG_ON(mm == &init_mm); |
428 | ||
c0951366 | 429 | if (page_mappings_only) |
d27cfa1f | 430 | flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; |
c0951366 | 431 | |
11509a30 | 432 | __create_pgd_mapping(mm->pgd, phys, virt, size, prot, |
c0951366 | 433 | pgd_pgtable_alloc, flags); |
d7ecbddf MS |
434 | } |
435 | ||
aa8c09be AB |
436 | static void update_mapping_prot(phys_addr_t phys, unsigned long virt, |
437 | phys_addr_t size, pgprot_t prot) | |
da141706 LA |
438 | { |
439 | if (virt < VMALLOC_START) { | |
aa8c09be | 440 | pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", |
da141706 LA |
441 | &phys, virt); |
442 | return; | |
443 | } | |
444 | ||
d27cfa1f AB |
445 | __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, |
446 | NO_CONT_MAPPINGS); | |
aa8c09be AB |
447 | |
448 | /* flush the TLBs after updating live kernel mappings */ | |
449 | flush_tlb_kernel_range(virt, virt + size); | |
da141706 LA |
450 | } |
451 | ||
20a004e7 | 452 | static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, |
98d2e153 TA |
453 | phys_addr_t end, pgprot_t prot, int flags) |
454 | { | |
20a004e7 | 455 | __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, |
98d2e153 TA |
456 | prot, early_pgtable_alloc, flags); |
457 | } | |
458 | ||
459 | void __init mark_linear_text_alias_ro(void) | |
460 | { | |
461 | /* | |
462 | * Remove the write permissions from the linear alias of .text/.rodata | |
463 | */ | |
464 | update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), | |
465 | (unsigned long)__init_begin - (unsigned long)_text, | |
466 | PAGE_KERNEL_RO); | |
467 | } | |
468 | ||
20a004e7 | 469 | static void __init map_mem(pgd_t *pgdp) |
da141706 | 470 | { |
eac8017f MC |
471 | phys_addr_t kernel_start = __pa_symbol(_text); |
472 | phys_addr_t kernel_end = __pa_symbol(__init_begin); | |
98d2e153 | 473 | struct memblock_region *reg; |
c0951366 AB |
474 | int flags = 0; |
475 | ||
c55191e9 | 476 | if (rodata_full || debug_pagealloc_enabled()) |
d27cfa1f | 477 | flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; |
068a17a5 | 478 | |
da141706 | 479 | /* |
f9040773 AB |
480 | * Take care not to create a writable alias for the |
481 | * read-only text and rodata sections of the kernel image. | |
98d2e153 TA |
482 | * So temporarily mark them as NOMAP to skip mappings in |
483 | * the following for-loop | |
da141706 | 484 | */ |
98d2e153 TA |
485 | memblock_mark_nomap(kernel_start, kernel_end - kernel_start); |
486 | #ifdef CONFIG_KEXEC_CORE | |
487 | if (crashk_res.end) | |
488 | memblock_mark_nomap(crashk_res.start, | |
489 | resource_size(&crashk_res)); | |
490 | #endif | |
068a17a5 | 491 | |
98d2e153 TA |
492 | /* map all the memory banks */ |
493 | for_each_memblock(memory, reg) { | |
494 | phys_addr_t start = reg->base; | |
495 | phys_addr_t end = start + reg->size; | |
da141706 | 496 | |
98d2e153 TA |
497 | if (start >= end) |
498 | break; | |
499 | if (memblock_is_nomap(reg)) | |
500 | continue; | |
501 | ||
20a004e7 | 502 | __map_memblock(pgdp, start, end, PAGE_KERNEL, flags); |
98d2e153 | 503 | } |
f9040773 AB |
504 | |
505 | /* | |
5ea5306c AB |
506 | * Map the linear alias of the [_text, __init_begin) interval |
507 | * as non-executable now, and remove the write permission in | |
508 | * mark_linear_text_alias_ro() below (which will be called after | |
509 | * alternative patching has completed). This makes the contents | |
510 | * of the region accessible to subsystems such as hibernate, | |
511 | * but protects it from inadvertent modification or execution. | |
d27cfa1f AB |
512 | * Note that contiguous mappings cannot be remapped in this way, |
513 | * so we should avoid them here. | |
f9040773 | 514 | */ |
20a004e7 | 515 | __map_memblock(pgdp, kernel_start, kernel_end, |
98d2e153 TA |
516 | PAGE_KERNEL, NO_CONT_MAPPINGS); |
517 | memblock_clear_nomap(kernel_start, kernel_end - kernel_start); | |
da141706 | 518 | |
98d2e153 | 519 | #ifdef CONFIG_KEXEC_CORE |
5ea5306c | 520 | /* |
98d2e153 TA |
521 | * Use page-level mappings here so that we can shrink the region |
522 | * in page granularity and put back unused memory to buddy system | |
523 | * through /sys/kernel/kexec_crash_size interface. | |
5ea5306c | 524 | */ |
98d2e153 | 525 | if (crashk_res.end) { |
20a004e7 | 526 | __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1, |
98d2e153 TA |
527 | PAGE_KERNEL, |
528 | NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); | |
529 | memblock_clear_nomap(crashk_res.start, | |
530 | resource_size(&crashk_res)); | |
c1cc1552 | 531 | } |
98d2e153 | 532 | #endif |
c1cc1552 CM |
533 | } |
534 | ||
da141706 LA |
535 | void mark_rodata_ro(void) |
536 | { | |
2f39b5f9 | 537 | unsigned long section_size; |
f9040773 | 538 | |
2f39b5f9 | 539 | /* |
9fdc14c5 AB |
540 | * mark .rodata as read only. Use __init_begin rather than __end_rodata |
541 | * to cover NOTES and EXCEPTION_TABLE. | |
2f39b5f9 | 542 | */ |
9fdc14c5 | 543 | section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; |
aa8c09be | 544 | update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, |
2f39b5f9 | 545 | section_size, PAGE_KERNEL_RO); |
e98216b5 | 546 | |
1404d6f1 | 547 | debug_checkwx(); |
da141706 | 548 | } |
da141706 | 549 | |
20a004e7 | 550 | static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, |
d27cfa1f | 551 | pgprot_t prot, struct vm_struct *vma, |
92bbd16e | 552 | int flags, unsigned long vm_flags) |
068a17a5 | 553 | { |
2077be67 | 554 | phys_addr_t pa_start = __pa_symbol(va_start); |
068a17a5 MR |
555 | unsigned long size = va_end - va_start; |
556 | ||
557 | BUG_ON(!PAGE_ALIGNED(pa_start)); | |
558 | BUG_ON(!PAGE_ALIGNED(size)); | |
559 | ||
20a004e7 | 560 | __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot, |
d27cfa1f | 561 | early_pgtable_alloc, flags); |
f9040773 | 562 | |
92bbd16e WD |
563 | if (!(vm_flags & VM_NO_GUARD)) |
564 | size += PAGE_SIZE; | |
565 | ||
f9040773 AB |
566 | vma->addr = va_start; |
567 | vma->phys_addr = pa_start; | |
568 | vma->size = size; | |
92bbd16e | 569 | vma->flags = VM_MAP | vm_flags; |
f9040773 AB |
570 | vma->caller = __builtin_return_address(0); |
571 | ||
572 | vm_area_add_early(vma); | |
068a17a5 MR |
573 | } |
574 | ||
28b066da AB |
575 | static int __init parse_rodata(char *arg) |
576 | { | |
c55191e9 AB |
577 | int ret = strtobool(arg, &rodata_enabled); |
578 | if (!ret) { | |
579 | rodata_full = false; | |
580 | return 0; | |
581 | } | |
582 | ||
583 | /* permit 'full' in addition to boolean options */ | |
584 | if (strcmp(arg, "full")) | |
585 | return -EINVAL; | |
586 | ||
587 | rodata_enabled = true; | |
588 | rodata_full = true; | |
589 | return 0; | |
28b066da AB |
590 | } |
591 | early_param("rodata", parse_rodata); | |
592 | ||
51a0048b WD |
593 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
594 | static int __init map_entry_trampoline(void) | |
595 | { | |
51a0048b WD |
596 | pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; |
597 | phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); | |
598 | ||
599 | /* The trampoline is always mapped and can therefore be global */ | |
600 | pgprot_val(prot) &= ~PTE_NG; | |
601 | ||
602 | /* Map only the text into the trampoline page table */ | |
603 | memset(tramp_pg_dir, 0, PGD_SIZE); | |
604 | __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, | |
475ba3fc | 605 | prot, __pgd_pgtable_alloc, 0); |
51a0048b | 606 | |
6c27c408 | 607 | /* Map both the text and data into the kernel page table */ |
51a0048b | 608 | __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); |
6c27c408 WD |
609 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { |
610 | extern char __entry_tramp_data_start[]; | |
611 | ||
612 | __set_fixmap(FIX_ENTRY_TRAMP_DATA, | |
613 | __pa_symbol(__entry_tramp_data_start), | |
614 | PAGE_KERNEL_RO); | |
615 | } | |
616 | ||
51a0048b WD |
617 | return 0; |
618 | } | |
619 | core_initcall(map_entry_trampoline); | |
620 | #endif | |
621 | ||
068a17a5 MR |
622 | /* |
623 | * Create fine-grained mappings for the kernel. | |
624 | */ | |
20a004e7 | 625 | static void __init map_kernel(pgd_t *pgdp) |
068a17a5 | 626 | { |
2ebe088b AB |
627 | static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext, |
628 | vmlinux_initdata, vmlinux_data; | |
068a17a5 | 629 | |
28b066da AB |
630 | /* |
631 | * External debuggers may need to write directly to the text | |
632 | * mapping to install SW breakpoints. Allow this (only) when | |
633 | * explicitly requested with rodata=off. | |
634 | */ | |
635 | pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; | |
636 | ||
d27cfa1f AB |
637 | /* |
638 | * Only rodata will be remapped with different permissions later on, | |
639 | * all other segments are allowed to use contiguous mappings. | |
640 | */ | |
20a004e7 | 641 | map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0, |
92bbd16e | 642 | VM_NO_GUARD); |
20a004e7 | 643 | map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL, |
92bbd16e | 644 | &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); |
20a004e7 | 645 | map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot, |
92bbd16e | 646 | &vmlinux_inittext, 0, VM_NO_GUARD); |
20a004e7 | 647 | map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL, |
92bbd16e | 648 | &vmlinux_initdata, 0, VM_NO_GUARD); |
20a004e7 | 649 | map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); |
068a17a5 | 650 | |
20a004e7 | 651 | if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) { |
f9040773 AB |
652 | /* |
653 | * The fixmap falls in a separate pgd to the kernel, and doesn't | |
654 | * live in the carveout for the swapper_pg_dir. We can simply | |
655 | * re-use the existing dir for the fixmap. | |
656 | */ | |
20a004e7 WD |
657 | set_pgd(pgd_offset_raw(pgdp, FIXADDR_START), |
658 | READ_ONCE(*pgd_offset_k(FIXADDR_START))); | |
f9040773 AB |
659 | } else if (CONFIG_PGTABLE_LEVELS > 3) { |
660 | /* | |
661 | * The fixmap shares its top level pgd entry with the kernel | |
662 | * mapping. This can really only occur when we are running | |
663 | * with 16k/4 levels, so we can simply reuse the pud level | |
664 | * entry instead. | |
665 | */ | |
666 | BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); | |
20a004e7 WD |
667 | pud_populate(&init_mm, |
668 | pud_set_fixmap_offset(pgdp, FIXADDR_START), | |
19338304 | 669 | lm_alias(bm_pmd)); |
f9040773 AB |
670 | pud_clear_fixmap(); |
671 | } else { | |
672 | BUG(); | |
673 | } | |
068a17a5 | 674 | |
20a004e7 | 675 | kasan_copy_shadow(pgdp); |
068a17a5 MR |
676 | } |
677 | ||
c1cc1552 CM |
678 | void __init paging_init(void) |
679 | { | |
2330b7ca | 680 | pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir)); |
068a17a5 | 681 | |
20a004e7 WD |
682 | map_kernel(pgdp); |
683 | map_mem(pgdp); | |
068a17a5 | 684 | |
068a17a5 | 685 | pgd_clear_fixmap(); |
068a17a5 | 686 | |
2077be67 | 687 | cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); |
2b5548b6 | 688 | init_mm.pgd = swapper_pg_dir; |
068a17a5 | 689 | |
2b5548b6 JY |
690 | memblock_free(__pa_symbol(init_pg_dir), |
691 | __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); | |
24cc61d8 AB |
692 | |
693 | memblock_allow_resize(); | |
c1cc1552 CM |
694 | } |
695 | ||
c1cc1552 CM |
696 | /* |
697 | * Check whether a kernel address is valid (derived from arch/x86/). | |
698 | */ | |
699 | int kern_addr_valid(unsigned long addr) | |
700 | { | |
20a004e7 WD |
701 | pgd_t *pgdp; |
702 | pud_t *pudp, pud; | |
703 | pmd_t *pmdp, pmd; | |
704 | pte_t *ptep, pte; | |
c1cc1552 CM |
705 | |
706 | if ((((long)addr) >> VA_BITS) != -1UL) | |
707 | return 0; | |
708 | ||
20a004e7 WD |
709 | pgdp = pgd_offset_k(addr); |
710 | if (pgd_none(READ_ONCE(*pgdp))) | |
c1cc1552 CM |
711 | return 0; |
712 | ||
20a004e7 WD |
713 | pudp = pud_offset(pgdp, addr); |
714 | pud = READ_ONCE(*pudp); | |
715 | if (pud_none(pud)) | |
c1cc1552 CM |
716 | return 0; |
717 | ||
20a004e7 WD |
718 | if (pud_sect(pud)) |
719 | return pfn_valid(pud_pfn(pud)); | |
206a2a73 | 720 | |
20a004e7 WD |
721 | pmdp = pmd_offset(pudp, addr); |
722 | pmd = READ_ONCE(*pmdp); | |
723 | if (pmd_none(pmd)) | |
c1cc1552 CM |
724 | return 0; |
725 | ||
20a004e7 WD |
726 | if (pmd_sect(pmd)) |
727 | return pfn_valid(pmd_pfn(pmd)); | |
da6e4cb6 | 728 | |
20a004e7 WD |
729 | ptep = pte_offset_kernel(pmdp, addr); |
730 | pte = READ_ONCE(*ptep); | |
731 | if (pte_none(pte)) | |
c1cc1552 CM |
732 | return 0; |
733 | ||
20a004e7 | 734 | return pfn_valid(pte_pfn(pte)); |
c1cc1552 CM |
735 | } |
736 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
b433dce0 | 737 | #if !ARM64_SWAPPER_USES_SECTION_MAPS |
7b73d978 CH |
738 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
739 | struct vmem_altmap *altmap) | |
c1cc1552 | 740 | { |
0aad818b | 741 | return vmemmap_populate_basepages(start, end, node); |
c1cc1552 | 742 | } |
b433dce0 | 743 | #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ |
7b73d978 CH |
744 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
745 | struct vmem_altmap *altmap) | |
c1cc1552 | 746 | { |
0aad818b | 747 | unsigned long addr = start; |
c1cc1552 | 748 | unsigned long next; |
20a004e7 WD |
749 | pgd_t *pgdp; |
750 | pud_t *pudp; | |
751 | pmd_t *pmdp; | |
c1cc1552 CM |
752 | |
753 | do { | |
754 | next = pmd_addr_end(addr, end); | |
755 | ||
20a004e7 WD |
756 | pgdp = vmemmap_pgd_populate(addr, node); |
757 | if (!pgdp) | |
c1cc1552 CM |
758 | return -ENOMEM; |
759 | ||
20a004e7 WD |
760 | pudp = vmemmap_pud_populate(pgdp, addr, node); |
761 | if (!pudp) | |
c1cc1552 CM |
762 | return -ENOMEM; |
763 | ||
20a004e7 WD |
764 | pmdp = pmd_offset(pudp, addr); |
765 | if (pmd_none(READ_ONCE(*pmdp))) { | |
c1cc1552 CM |
766 | void *p = NULL; |
767 | ||
768 | p = vmemmap_alloc_block_buf(PMD_SIZE, node); | |
769 | if (!p) | |
770 | return -ENOMEM; | |
771 | ||
20a004e7 | 772 | pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); |
c1cc1552 | 773 | } else |
20a004e7 | 774 | vmemmap_verify((pte_t *)pmdp, node, addr, next); |
c1cc1552 CM |
775 | } while (addr = next, addr != end); |
776 | ||
777 | return 0; | |
778 | } | |
779 | #endif /* CONFIG_ARM64_64K_PAGES */ | |
24b6d416 CH |
780 | void vmemmap_free(unsigned long start, unsigned long end, |
781 | struct vmem_altmap *altmap) | |
0197518c TC |
782 | { |
783 | } | |
c1cc1552 | 784 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
af86e597 | 785 | |
af86e597 LA |
786 | static inline pud_t * fixmap_pud(unsigned long addr) |
787 | { | |
20a004e7 WD |
788 | pgd_t *pgdp = pgd_offset_k(addr); |
789 | pgd_t pgd = READ_ONCE(*pgdp); | |
af86e597 | 790 | |
20a004e7 | 791 | BUG_ON(pgd_none(pgd) || pgd_bad(pgd)); |
af86e597 | 792 | |
20a004e7 | 793 | return pud_offset_kimg(pgdp, addr); |
af86e597 LA |
794 | } |
795 | ||
796 | static inline pmd_t * fixmap_pmd(unsigned long addr) | |
797 | { | |
20a004e7 WD |
798 | pud_t *pudp = fixmap_pud(addr); |
799 | pud_t pud = READ_ONCE(*pudp); | |
af86e597 | 800 | |
20a004e7 | 801 | BUG_ON(pud_none(pud) || pud_bad(pud)); |
af86e597 | 802 | |
20a004e7 | 803 | return pmd_offset_kimg(pudp, addr); |
af86e597 LA |
804 | } |
805 | ||
806 | static inline pte_t * fixmap_pte(unsigned long addr) | |
807 | { | |
157962f5 | 808 | return &bm_pte[pte_index(addr)]; |
af86e597 LA |
809 | } |
810 | ||
2077be67 LA |
811 | /* |
812 | * The p*d_populate functions call virt_to_phys implicitly so they can't be used | |
813 | * directly on kernel symbols (bm_p*d). This function is called too early to use | |
814 | * lm_alias so __p*d_populate functions must be used to populate with the | |
815 | * physical address from __pa_symbol. | |
816 | */ | |
af86e597 LA |
817 | void __init early_fixmap_init(void) |
818 | { | |
20a004e7 WD |
819 | pgd_t *pgdp, pgd; |
820 | pud_t *pudp; | |
821 | pmd_t *pmdp; | |
af86e597 LA |
822 | unsigned long addr = FIXADDR_START; |
823 | ||
20a004e7 WD |
824 | pgdp = pgd_offset_k(addr); |
825 | pgd = READ_ONCE(*pgdp); | |
f80fb3a3 | 826 | if (CONFIG_PGTABLE_LEVELS > 3 && |
20a004e7 | 827 | !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) { |
f9040773 AB |
828 | /* |
829 | * We only end up here if the kernel mapping and the fixmap | |
830 | * share the top level pgd entry, which should only happen on | |
831 | * 16k/4 levels configurations. | |
832 | */ | |
833 | BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); | |
20a004e7 | 834 | pudp = pud_offset_kimg(pgdp, addr); |
f9040773 | 835 | } else { |
20a004e7 WD |
836 | if (pgd_none(pgd)) |
837 | __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE); | |
838 | pudp = fixmap_pud(addr); | |
f9040773 | 839 | } |
20a004e7 WD |
840 | if (pud_none(READ_ONCE(*pudp))) |
841 | __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); | |
842 | pmdp = fixmap_pmd(addr); | |
843 | __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); | |
af86e597 LA |
844 | |
845 | /* | |
846 | * The boot-ioremap range spans multiple pmds, for which | |
157962f5 | 847 | * we are not prepared: |
af86e597 LA |
848 | */ |
849 | BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) | |
850 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); | |
851 | ||
20a004e7 WD |
852 | if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) |
853 | || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { | |
af86e597 | 854 | WARN_ON(1); |
20a004e7 WD |
855 | pr_warn("pmdp %p != %p, %p\n", |
856 | pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), | |
af86e597 LA |
857 | fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); |
858 | pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", | |
859 | fix_to_virt(FIX_BTMAP_BEGIN)); | |
860 | pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", | |
861 | fix_to_virt(FIX_BTMAP_END)); | |
862 | ||
863 | pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); | |
864 | pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); | |
865 | } | |
866 | } | |
867 | ||
18b4b276 JM |
868 | /* |
869 | * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we | |
870 | * ever need to use IPIs for TLB broadcasting, then we're in trouble here. | |
871 | */ | |
af86e597 LA |
872 | void __set_fixmap(enum fixed_addresses idx, |
873 | phys_addr_t phys, pgprot_t flags) | |
874 | { | |
875 | unsigned long addr = __fix_to_virt(idx); | |
20a004e7 | 876 | pte_t *ptep; |
af86e597 | 877 | |
b63dbef9 | 878 | BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); |
af86e597 | 879 | |
20a004e7 | 880 | ptep = fixmap_pte(addr); |
af86e597 LA |
881 | |
882 | if (pgprot_val(flags)) { | |
20a004e7 | 883 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); |
af86e597 | 884 | } else { |
20a004e7 | 885 | pte_clear(&init_mm, addr, ptep); |
af86e597 LA |
886 | flush_tlb_kernel_range(addr, addr+PAGE_SIZE); |
887 | } | |
888 | } | |
61bd93ce | 889 | |
f80fb3a3 | 890 | void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) |
61bd93ce AB |
891 | { |
892 | const u64 dt_virt_base = __fix_to_virt(FIX_FDT); | |
f80fb3a3 | 893 | int offset; |
61bd93ce AB |
894 | void *dt_virt; |
895 | ||
896 | /* | |
897 | * Check whether the physical FDT address is set and meets the minimum | |
898 | * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be | |
04a84810 AB |
899 | * at least 8 bytes so that we can always access the magic and size |
900 | * fields of the FDT header after mapping the first chunk, double check | |
901 | * here if that is indeed the case. | |
61bd93ce AB |
902 | */ |
903 | BUILD_BUG_ON(MIN_FDT_ALIGN < 8); | |
904 | if (!dt_phys || dt_phys % MIN_FDT_ALIGN) | |
905 | return NULL; | |
906 | ||
907 | /* | |
908 | * Make sure that the FDT region can be mapped without the need to | |
909 | * allocate additional translation table pages, so that it is safe | |
132233a7 | 910 | * to call create_mapping_noalloc() this early. |
61bd93ce AB |
911 | * |
912 | * On 64k pages, the FDT will be mapped using PTEs, so we need to | |
913 | * be in the same PMD as the rest of the fixmap. | |
914 | * On 4k pages, we'll use section mappings for the FDT so we only | |
915 | * have to be in the same PUD. | |
916 | */ | |
917 | BUILD_BUG_ON(dt_virt_base % SZ_2M); | |
918 | ||
b433dce0 SP |
919 | BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != |
920 | __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); | |
61bd93ce | 921 | |
b433dce0 | 922 | offset = dt_phys % SWAPPER_BLOCK_SIZE; |
61bd93ce AB |
923 | dt_virt = (void *)dt_virt_base + offset; |
924 | ||
925 | /* map the first chunk so we can read the size from the header */ | |
132233a7 LA |
926 | create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), |
927 | dt_virt_base, SWAPPER_BLOCK_SIZE, prot); | |
61bd93ce | 928 | |
04a84810 | 929 | if (fdt_magic(dt_virt) != FDT_MAGIC) |
61bd93ce AB |
930 | return NULL; |
931 | ||
f80fb3a3 AB |
932 | *size = fdt_totalsize(dt_virt); |
933 | if (*size > MAX_FDT_SIZE) | |
61bd93ce AB |
934 | return NULL; |
935 | ||
f80fb3a3 | 936 | if (offset + *size > SWAPPER_BLOCK_SIZE) |
132233a7 | 937 | create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, |
f80fb3a3 | 938 | round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); |
61bd93ce | 939 | |
f80fb3a3 AB |
940 | return dt_virt; |
941 | } | |
61bd93ce | 942 | |
f80fb3a3 AB |
943 | void *__init fixmap_remap_fdt(phys_addr_t dt_phys) |
944 | { | |
945 | void *dt_virt; | |
946 | int size; | |
947 | ||
948 | dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); | |
949 | if (!dt_virt) | |
950 | return NULL; | |
951 | ||
952 | memblock_reserve(dt_phys, size); | |
61bd93ce AB |
953 | return dt_virt; |
954 | } | |
324420bf AB |
955 | |
956 | int __init arch_ioremap_pud_supported(void) | |
957 | { | |
958 | /* only 4k granule supports level 1 block mappings */ | |
959 | return IS_ENABLED(CONFIG_ARM64_4K_PAGES); | |
960 | } | |
961 | ||
962 | int __init arch_ioremap_pmd_supported(void) | |
963 | { | |
964 | return 1; | |
965 | } | |
966 | ||
20a004e7 | 967 | int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) |
324420bf | 968 | { |
19338304 KM |
969 | pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | |
970 | pgprot_val(mk_sect_prot(prot))); | |
82034c23 | 971 | pud_t new_pud = pfn_pud(__phys_to_pfn(phys), sect_prot); |
15122ee2 | 972 | |
82034c23 LA |
973 | /* Only allow permission changes for now */ |
974 | if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)), | |
975 | pud_val(new_pud))) | |
15122ee2 WD |
976 | return 0; |
977 | ||
324420bf | 978 | BUG_ON(phys & ~PUD_MASK); |
82034c23 | 979 | set_pud(pudp, new_pud); |
324420bf AB |
980 | return 1; |
981 | } | |
982 | ||
20a004e7 | 983 | int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) |
324420bf | 984 | { |
19338304 KM |
985 | pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | |
986 | pgprot_val(mk_sect_prot(prot))); | |
82034c23 | 987 | pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), sect_prot); |
15122ee2 | 988 | |
82034c23 LA |
989 | /* Only allow permission changes for now */ |
990 | if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)), | |
991 | pmd_val(new_pmd))) | |
15122ee2 WD |
992 | return 0; |
993 | ||
324420bf | 994 | BUG_ON(phys & ~PMD_MASK); |
82034c23 | 995 | set_pmd(pmdp, new_pmd); |
324420bf AB |
996 | return 1; |
997 | } | |
998 | ||
20a004e7 | 999 | int pud_clear_huge(pud_t *pudp) |
324420bf | 1000 | { |
20a004e7 | 1001 | if (!pud_sect(READ_ONCE(*pudp))) |
324420bf | 1002 | return 0; |
20a004e7 | 1003 | pud_clear(pudp); |
324420bf AB |
1004 | return 1; |
1005 | } | |
1006 | ||
20a004e7 | 1007 | int pmd_clear_huge(pmd_t *pmdp) |
324420bf | 1008 | { |
20a004e7 | 1009 | if (!pmd_sect(READ_ONCE(*pmdp))) |
324420bf | 1010 | return 0; |
20a004e7 | 1011 | pmd_clear(pmdp); |
324420bf AB |
1012 | return 1; |
1013 | } | |
b6bdb751 | 1014 | |
ec28bb9c | 1015 | int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) |
b6bdb751 | 1016 | { |
ec28bb9c CP |
1017 | pte_t *table; |
1018 | pmd_t pmd; | |
1019 | ||
1020 | pmd = READ_ONCE(*pmdp); | |
1021 | ||
fac880c7 | 1022 | if (!pmd_table(pmd)) { |
9c006972 | 1023 | VM_WARN_ON(1); |
ec28bb9c CP |
1024 | return 1; |
1025 | } | |
1026 | ||
1027 | table = pte_offset_kernel(pmdp, addr); | |
1028 | pmd_clear(pmdp); | |
1029 | __flush_tlb_kernel_pgtable(addr); | |
1030 | pte_free_kernel(NULL, table); | |
1031 | return 1; | |
b6bdb751 TK |
1032 | } |
1033 | ||
ec28bb9c | 1034 | int pud_free_pmd_page(pud_t *pudp, unsigned long addr) |
b6bdb751 | 1035 | { |
ec28bb9c CP |
1036 | pmd_t *table; |
1037 | pmd_t *pmdp; | |
1038 | pud_t pud; | |
1039 | unsigned long next, end; | |
1040 | ||
1041 | pud = READ_ONCE(*pudp); | |
1042 | ||
fac880c7 | 1043 | if (!pud_table(pud)) { |
9c006972 | 1044 | VM_WARN_ON(1); |
ec28bb9c CP |
1045 | return 1; |
1046 | } | |
1047 | ||
1048 | table = pmd_offset(pudp, addr); | |
1049 | pmdp = table; | |
1050 | next = addr; | |
1051 | end = addr + PUD_SIZE; | |
1052 | do { | |
1053 | pmd_free_pte_page(pmdp, next); | |
1054 | } while (pmdp++, next += PMD_SIZE, next != end); | |
1055 | ||
1056 | pud_clear(pudp); | |
1057 | __flush_tlb_kernel_pgtable(addr); | |
1058 | pmd_free(NULL, table); | |
1059 | return 1; | |
b6bdb751 | 1060 | } |
4ab21506 | 1061 | |
8e2d4340 WD |
1062 | int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) |
1063 | { | |
1064 | return 0; /* Don't attempt a block mapping */ | |
1065 | } | |
1066 | ||
4ab21506 RM |
1067 | #ifdef CONFIG_MEMORY_HOTPLUG |
1068 | int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, | |
1069 | bool want_memblock) | |
1070 | { | |
1071 | int flags = 0; | |
1072 | ||
1073 | if (rodata_full || debug_pagealloc_enabled()) | |
1074 | flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; | |
1075 | ||
1076 | __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), | |
475ba3fc | 1077 | size, PAGE_KERNEL, __pgd_pgtable_alloc, flags); |
4ab21506 RM |
1078 | |
1079 | return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, | |
1080 | altmap, want_memblock); | |
1081 | } | |
1082 | #endif |