Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/arch/arm/mm/ioremap.c | |
4 | * | |
5 | * Re-map IO memory to kernel address space so that we can access it. | |
6 | * | |
7 | * (C) Copyright 1995 1996 Linus Torvalds | |
8 | * | |
9 | * Hacked for ARM by Phil Blundell <philb@gnu.org> | |
10 | * Hacked to allow all architectures to build, and various cleanups | |
11 | * by Russell King | |
12 | * | |
13 | * This allows a driver to remap an arbitrary region of bus memory into | |
14 | * virtual space. One should *only* use readl, writel, memcpy_toio and | |
15 | * so on with such remapped areas. | |
16 | * | |
17 | * Because the ARM only has a 32-bit address space we can't address the | |
18 | * whole of the (physical) PCI space at once. PCI huge-mode addressing | |
19 | * allows us to circumvent this restriction by splitting PCI space into | |
20 | * two 2GB chunks and mapping only one at a time into processor memory. | |
21 | * We use MMU protection domains to trap any attempt to access the bank | |
22 | * that is not currently mapped. (This isn't fully implemented yet.) | |
23 | */ | |
24 | #include <linux/module.h> | |
25 | #include <linux/errno.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/vmalloc.h> | |
fced80c7 | 28 | #include <linux/io.h> |
158e8bfe | 29 | #include <linux/sizes.h> |
024591f9 | 30 | #include <linux/memblock.h> |
1da177e4 | 31 | |
15d07dc9 | 32 | #include <asm/cp15.h> |
0ba8b9b2 | 33 | #include <asm/cputype.h> |
1da177e4 | 34 | #include <asm/cacheflush.h> |
2937367b | 35 | #include <asm/early_ioremap.h> |
ff0daca5 RK |
36 | #include <asm/mmu_context.h> |
37 | #include <asm/pgalloc.h> | |
1da177e4 | 38 | #include <asm/tlbflush.h> |
b8bc0e50 | 39 | #include <asm/set_memory.h> |
9f97da78 | 40 | #include <asm/system_info.h> |
ff0daca5 | 41 | |
b29e9f5e | 42 | #include <asm/mach/map.h> |
c2794437 | 43 | #include <asm/mach/pci.h> |
b29e9f5e RK |
44 | #include "mm.h" |
45 | ||
ed8fd218 JK |
46 | |
47 | LIST_HEAD(static_vmlist); | |
48 | ||
49 | static struct static_vm *find_static_vm_paddr(phys_addr_t paddr, | |
50 | size_t size, unsigned int mtype) | |
51 | { | |
52 | struct static_vm *svm; | |
53 | struct vm_struct *vm; | |
54 | ||
55 | list_for_each_entry(svm, &static_vmlist, list) { | |
56 | vm = &svm->vm; | |
57 | if (!(vm->flags & VM_ARM_STATIC_MAPPING)) | |
58 | continue; | |
59 | if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) | |
60 | continue; | |
61 | ||
62 | if (vm->phys_addr > paddr || | |
63 | paddr + size - 1 > vm->phys_addr + vm->size - 1) | |
64 | continue; | |
65 | ||
66 | return svm; | |
67 | } | |
68 | ||
69 | return NULL; | |
70 | } | |
71 | ||
72 | struct static_vm *find_static_vm_vaddr(void *vaddr) | |
73 | { | |
74 | struct static_vm *svm; | |
75 | struct vm_struct *vm; | |
76 | ||
77 | list_for_each_entry(svm, &static_vmlist, list) { | |
78 | vm = &svm->vm; | |
79 | ||
80 | /* static_vmlist is ascending order */ | |
81 | if (vm->addr > vaddr) | |
82 | break; | |
83 | ||
84 | if (vm->addr <= vaddr && vm->addr + vm->size > vaddr) | |
85 | return svm; | |
86 | } | |
87 | ||
88 | return NULL; | |
89 | } | |
90 | ||
91 | void __init add_static_vm_early(struct static_vm *svm) | |
92 | { | |
93 | struct static_vm *curr_svm; | |
94 | struct vm_struct *vm; | |
95 | void *vaddr; | |
96 | ||
97 | vm = &svm->vm; | |
98 | vm_area_add_early(vm); | |
99 | vaddr = vm->addr; | |
100 | ||
101 | list_for_each_entry(curr_svm, &static_vmlist, list) { | |
102 | vm = &curr_svm->vm; | |
103 | ||
104 | if (vm->addr > vaddr) | |
105 | break; | |
106 | } | |
107 | list_add_tail(&svm->list, &curr_svm->list); | |
108 | } | |
109 | ||
69d3a84a HD |
110 | int ioremap_page(unsigned long virt, unsigned long phys, |
111 | const struct mem_type *mtype) | |
112 | { | |
d7461963 RK |
113 | return ioremap_page_range(virt, virt + PAGE_SIZE, phys, |
114 | __pgprot(mtype->prot_pte)); | |
69d3a84a HD |
115 | } |
116 | EXPORT_SYMBOL(ioremap_page); | |
ff0daca5 | 117 | |
3e99675a | 118 | void __check_vmalloc_seq(struct mm_struct *mm) |
ff0daca5 | 119 | { |
d31e23af | 120 | int seq; |
ff0daca5 RK |
121 | |
122 | do { | |
d31e23af | 123 | seq = atomic_read(&init_mm.context.vmalloc_seq); |
ff0daca5 RK |
124 | memcpy(pgd_offset(mm, VMALLOC_START), |
125 | pgd_offset_k(VMALLOC_START), | |
126 | sizeof(pgd_t) * (pgd_index(VMALLOC_END) - | |
127 | pgd_index(VMALLOC_START))); | |
d31e23af AB |
128 | /* |
129 | * Use a store-release so that other CPUs that observe the | |
130 | * counter's new value are guaranteed to see the results of the | |
131 | * memcpy as well. | |
132 | */ | |
133 | atomic_set_release(&mm->context.vmalloc_seq, seq); | |
134 | } while (seq != atomic_read(&init_mm.context.vmalloc_seq)); | |
ff0daca5 RK |
135 | } |
136 | ||
da028779 | 137 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
ff0daca5 RK |
138 | /* |
139 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, | |
140 | * the other CPUs will not see this change until their next context switch. | |
141 | * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs | |
142 | * which requires the new ioremap'd region to be referenced, the CPU will | |
143 | * reference the _old_ region. | |
144 | * | |
31aa8fd6 RK |
145 | * Note that get_vm_area_caller() allocates a guard 4K page, so we need to |
146 | * mask the size back to 1MB aligned or we will overflow in the loop below. | |
ff0daca5 RK |
147 | */ |
148 | static void unmap_area_sections(unsigned long virt, unsigned long size) | |
149 | { | |
24f11ec0 | 150 | unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); |
e05c7b1f MR |
151 | pmd_t *pmdp = pmd_off_k(addr); |
152 | ||
ff0daca5 | 153 | do { |
03a6b827 | 154 | pmd_t pmd = *pmdp; |
ff0daca5 | 155 | |
ff0daca5 RK |
156 | if (!pmd_none(pmd)) { |
157 | /* | |
158 | * Clear the PMD from the page table, and | |
3e99675a | 159 | * increment the vmalloc sequence so others |
ff0daca5 RK |
160 | * notice this change. |
161 | * | |
162 | * Note: this is still racy on SMP machines. | |
163 | */ | |
164 | pmd_clear(pmdp); | |
d31e23af | 165 | atomic_inc_return_release(&init_mm.context.vmalloc_seq); |
ff0daca5 RK |
166 | |
167 | /* | |
168 | * Free the page table, if there was one. | |
169 | */ | |
170 | if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) | |
5e541973 | 171 | pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); |
ff0daca5 RK |
172 | } |
173 | ||
03a6b827 CM |
174 | addr += PMD_SIZE; |
175 | pmdp += 2; | |
ff0daca5 RK |
176 | } while (addr < end); |
177 | ||
178 | /* | |
179 | * Ensure that the active_mm is up to date - we want to | |
180 | * catch any use-after-iounmap cases. | |
181 | */ | |
d31e23af | 182 | check_vmalloc_seq(current->active_mm); |
ff0daca5 RK |
183 | |
184 | flush_tlb_kernel_range(virt, end); | |
185 | } | |
186 | ||
187 | static int | |
188 | remap_area_sections(unsigned long virt, unsigned long pfn, | |
b29e9f5e | 189 | size_t size, const struct mem_type *type) |
ff0daca5 | 190 | { |
b29e9f5e | 191 | unsigned long addr = virt, end = virt + size; |
e05c7b1f | 192 | pmd_t *pmd = pmd_off_k(addr); |
ff0daca5 RK |
193 | |
194 | /* | |
195 | * Remove and free any PTE-based mapping, and | |
196 | * sync the current kernel mapping. | |
197 | */ | |
198 | unmap_area_sections(virt, size); | |
199 | ||
ff0daca5 | 200 | do { |
b29e9f5e | 201 | pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
ff0daca5 | 202 | pfn += SZ_1M >> PAGE_SHIFT; |
b29e9f5e | 203 | pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
ff0daca5 RK |
204 | pfn += SZ_1M >> PAGE_SHIFT; |
205 | flush_pmd_entry(pmd); | |
206 | ||
03a6b827 CM |
207 | addr += PMD_SIZE; |
208 | pmd += 2; | |
ff0daca5 RK |
209 | } while (addr < end); |
210 | ||
211 | return 0; | |
212 | } | |
a069c896 LB |
213 | |
214 | static int | |
215 | remap_area_supersections(unsigned long virt, unsigned long pfn, | |
b29e9f5e | 216 | size_t size, const struct mem_type *type) |
a069c896 | 217 | { |
b29e9f5e | 218 | unsigned long addr = virt, end = virt + size; |
e05c7b1f | 219 | pmd_t *pmd = pmd_off_k(addr); |
a069c896 LB |
220 | |
221 | /* | |
222 | * Remove and free any PTE-based mapping, and | |
223 | * sync the current kernel mapping. | |
224 | */ | |
225 | unmap_area_sections(virt, size); | |
a069c896 LB |
226 | do { |
227 | unsigned long super_pmd_val, i; | |
228 | ||
b29e9f5e RK |
229 | super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | |
230 | PMD_SECT_SUPER; | |
a069c896 LB |
231 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; |
232 | ||
233 | for (i = 0; i < 8; i++) { | |
a069c896 LB |
234 | pmd[0] = __pmd(super_pmd_val); |
235 | pmd[1] = __pmd(super_pmd_val); | |
236 | flush_pmd_entry(pmd); | |
237 | ||
03a6b827 CM |
238 | addr += PMD_SIZE; |
239 | pmd += 2; | |
a069c896 LB |
240 | } |
241 | ||
242 | pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; | |
243 | } while (addr < end); | |
244 | ||
245 | return 0; | |
246 | } | |
ff0daca5 RK |
247 | #endif |
248 | ||
20a1080d | 249 | static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, |
31aa8fd6 | 250 | unsigned long offset, size_t size, unsigned int mtype, void *caller) |
9d4ae727 | 251 | { |
b29e9f5e | 252 | const struct mem_type *type; |
ff0daca5 | 253 | int err; |
9d4ae727 | 254 | unsigned long addr; |
101eeda3 JK |
255 | struct vm_struct *area; |
256 | phys_addr_t paddr = __pfn_to_phys(pfn); | |
a069c896 | 257 | |
da028779 | 258 | #ifndef CONFIG_ARM_LPAE |
a069c896 LB |
259 | /* |
260 | * High mappings must be supersection aligned | |
261 | */ | |
101eeda3 | 262 | if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK)) |
a069c896 | 263 | return NULL; |
da028779 | 264 | #endif |
9d4ae727 | 265 | |
3603ab2b RK |
266 | type = get_mem_type(mtype); |
267 | if (!type) | |
268 | return NULL; | |
b29e9f5e | 269 | |
6d78b5f9 RK |
270 | /* |
271 | * Page align the mapping size, taking account of any offset. | |
272 | */ | |
273 | size = PAGE_ALIGN(offset + size); | |
c924aff8 | 274 | |
576d2f25 NP |
275 | /* |
276 | * Try to reuse one of the static mapping whenever possible. | |
277 | */ | |
101eeda3 JK |
278 | if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) { |
279 | struct static_vm *svm; | |
280 | ||
281 | svm = find_static_vm_paddr(paddr, size, mtype); | |
282 | if (svm) { | |
283 | addr = (unsigned long)svm->vm.addr; | |
284 | addr += paddr - svm->vm.phys_addr; | |
285 | return (void __iomem *) (offset + addr); | |
286 | } | |
576d2f25 | 287 | } |
576d2f25 NP |
288 | |
289 | /* | |
9ab9e4fc AB |
290 | * Don't allow RAM to be mapped with mismatched attributes - this |
291 | * causes problems with ARMv6+ | |
576d2f25 | 292 | */ |
024591f9 MR |
293 | if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) && |
294 | mtype != MT_MEMORY_RW)) | |
576d2f25 NP |
295 | return NULL; |
296 | ||
31aa8fd6 | 297 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
9d4ae727 DS |
298 | if (!area) |
299 | return NULL; | |
300 | addr = (unsigned long)area->addr; | |
101eeda3 | 301 | area->phys_addr = paddr; |
ff0daca5 | 302 | |
da028779 | 303 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
412489af CM |
304 | if (DOMAIN_IO == 0 && |
305 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || | |
4a56c1e4 | 306 | cpu_is_xsc3()) && pfn >= 0x100000 && |
101eeda3 | 307 | !((paddr | size | addr) & ~SUPERSECTION_MASK)) { |
a069c896 | 308 | area->flags |= VM_ARM_SECTION_MAPPING; |
b29e9f5e | 309 | err = remap_area_supersections(addr, pfn, size, type); |
101eeda3 | 310 | } else if (!((paddr | size | addr) & ~PMD_MASK)) { |
ff0daca5 | 311 | area->flags |= VM_ARM_SECTION_MAPPING; |
b29e9f5e | 312 | err = remap_area_sections(addr, pfn, size, type); |
ff0daca5 RK |
313 | } else |
314 | #endif | |
101eeda3 | 315 | err = ioremap_page_range(addr, addr + size, paddr, |
d7461963 | 316 | __pgprot(type->prot_pte)); |
ff0daca5 RK |
317 | |
318 | if (err) { | |
478922c2 | 319 | vunmap((void *)addr); |
9d4ae727 DS |
320 | return NULL; |
321 | } | |
ff0daca5 RK |
322 | |
323 | flush_cache_vmap(addr, addr + size); | |
324 | return (void __iomem *) (offset + addr); | |
9d4ae727 | 325 | } |
9d4ae727 | 326 | |
9b97173e | 327 | void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, |
31aa8fd6 | 328 | unsigned int mtype, void *caller) |
1da177e4 | 329 | { |
9b97173e | 330 | phys_addr_t last_addr; |
9d4ae727 DS |
331 | unsigned long offset = phys_addr & ~PAGE_MASK; |
332 | unsigned long pfn = __phys_to_pfn(phys_addr); | |
1da177e4 | 333 | |
9d4ae727 DS |
334 | /* |
335 | * Don't allow wraparound or zero size | |
336 | */ | |
1da177e4 LT |
337 | last_addr = phys_addr + size - 1; |
338 | if (!size || last_addr < phys_addr) | |
339 | return NULL; | |
340 | ||
31aa8fd6 RK |
341 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, |
342 | caller); | |
343 | } | |
344 | ||
345 | /* | |
346 | * Remap an arbitrary physical address space into the kernel virtual | |
347 | * address space. Needed when the kernel wants to access high addresses | |
348 | * directly. | |
349 | * | |
350 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | |
351 | * have to convert them into an offset in a page-aligned mapping, but the | |
352 | * caller shouldn't need to know that small detail. | |
353 | */ | |
354 | void __iomem * | |
355 | __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |
356 | unsigned int mtype) | |
357 | { | |
358 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, | |
20a1080d | 359 | __builtin_return_address(0)); |
31aa8fd6 RK |
360 | } |
361 | EXPORT_SYMBOL(__arm_ioremap_pfn); | |
362 | ||
9b97173e | 363 | void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, |
4fe7ef3a RH |
364 | unsigned int, void *) = |
365 | __arm_ioremap_caller; | |
366 | ||
20a1080d RK |
367 | void __iomem *ioremap(resource_size_t res_cookie, size_t size) |
368 | { | |
369 | return arch_ioremap_caller(res_cookie, size, MT_DEVICE, | |
370 | __builtin_return_address(0)); | |
371 | } | |
372 | EXPORT_SYMBOL(ioremap); | |
373 | ||
374 | void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) | |
375 | { | |
376 | return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, | |
377 | __builtin_return_address(0)); | |
378 | } | |
379 | EXPORT_SYMBOL(ioremap_cache); | |
380 | ||
381 | void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) | |
31aa8fd6 | 382 | { |
20a1080d RK |
383 | return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC, |
384 | __builtin_return_address(0)); | |
1da177e4 | 385 | } |
20a1080d | 386 | EXPORT_SYMBOL(ioremap_wc); |
1da177e4 | 387 | |
6c5482d5 TL |
388 | /* |
389 | * Remap an arbitrary physical address space into the kernel virtual | |
390 | * address space as memory. Needed when the kernel wants to execute | |
391 | * code in external memory. This is needed for reprogramming source | |
392 | * clocks that would affect normal memory for example. Please see | |
393 | * CONFIG_GENERIC_ALLOCATOR for allocating external memory. | |
394 | */ | |
395 | void __iomem * | |
9b97173e | 396 | __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) |
6c5482d5 TL |
397 | { |
398 | unsigned int mtype; | |
399 | ||
400 | if (cached) | |
2e2c9de2 | 401 | mtype = MT_MEMORY_RWX; |
6c5482d5 | 402 | else |
2e2c9de2 | 403 | mtype = MT_MEMORY_RWX_NONCACHED; |
6c5482d5 TL |
404 | |
405 | return __arm_ioremap_caller(phys_addr, size, mtype, | |
406 | __builtin_return_address(0)); | |
407 | } | |
408 | ||
b8bc0e50 RKO |
409 | void __arm_iomem_set_ro(void __iomem *ptr, size_t size) |
410 | { | |
411 | set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE); | |
412 | } | |
413 | ||
9ab9e4fc AB |
414 | void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) |
415 | { | |
416 | return (__force void *)arch_ioremap_caller(phys_addr, size, | |
417 | MT_MEMORY_RW, | |
418 | __builtin_return_address(0)); | |
419 | } | |
420 | ||
d803336a | 421 | void iounmap(volatile void __iomem *io_addr) |
1da177e4 | 422 | { |
09d9bae0 | 423 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
101eeda3 JK |
424 | struct static_vm *svm; |
425 | ||
426 | /* If this is a static mapping, we must leave it alone */ | |
427 | svm = find_static_vm_vaddr(addr); | |
428 | if (svm) | |
429 | return; | |
ff0daca5 | 430 | |
6ae25a5b | 431 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
101eeda3 JK |
432 | { |
433 | struct vm_struct *vm; | |
434 | ||
435 | vm = find_vm_area(addr); | |
436 | ||
576d2f25 NP |
437 | /* |
438 | * If this is a section based mapping we need to handle it | |
439 | * specially as the VM subsystem does not know how to handle | |
440 | * such a beast. | |
441 | */ | |
101eeda3 | 442 | if (vm && (vm->flags & VM_ARM_SECTION_MAPPING)) |
576d2f25 | 443 | unmap_area_sections((unsigned long)vm->addr, vm->size); |
ff0daca5 | 444 | } |
101eeda3 | 445 | #endif |
ff0daca5 | 446 | |
24f11ec0 | 447 | vunmap(addr); |
1da177e4 | 448 | } |
20a1080d | 449 | EXPORT_SYMBOL(iounmap); |
c2794437 | 450 | |
645b3026 | 451 | #if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA) |
1c8c3cf0 TP |
452 | static int pci_ioremap_mem_type = MT_DEVICE; |
453 | ||
454 | void pci_ioremap_set_mem_type(int mem_type) | |
455 | { | |
456 | pci_ioremap_mem_type = mem_type; | |
457 | } | |
458 | ||
bc02973a PR |
459 | int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) |
460 | { | |
461 | unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; | |
462 | ||
463 | if (!(res->flags & IORESOURCE_IO)) | |
464 | return -EINVAL; | |
465 | ||
466 | if (res->end > IO_SPACE_LIMIT) | |
467 | return -EINVAL; | |
468 | ||
469 | return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr, | |
470 | __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte)); | |
471 | } | |
472 | EXPORT_SYMBOL(pci_remap_iospace); | |
473 | ||
b9cdbe6e LP |
474 | void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size) |
475 | { | |
476 | return arch_ioremap_caller(res_cookie, size, MT_UNCACHED, | |
477 | __builtin_return_address(0)); | |
478 | } | |
479 | EXPORT_SYMBOL_GPL(pci_remap_cfgspace); | |
c2794437 | 480 | #endif |
2937367b AB |
481 | |
482 | /* | |
483 | * Must be called after early_fixmap_init | |
484 | */ | |
485 | void __init early_ioremap_init(void) | |
486 | { | |
487 | early_ioremap_setup(); | |
488 | } | |
260364d1 MR |
489 | |
490 | bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, | |
491 | unsigned long flags) | |
492 | { | |
493 | unsigned long pfn = PHYS_PFN(offset); | |
494 | ||
495 | return memblock_is_map_memory(pfn); | |
496 | } |