Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * Re-map IO memory to kernel address space so that we can access it. |
4 | * This is needed for high PCI addresses that aren't mapped in the | |
5 | * 640k-1MB IO memory area on PC's | |
6 | * | |
7 | * (C) Copyright 1995 1996 Linus Torvalds | |
8 | */ | |
9 | ||
57c8a661 | 10 | #include <linux/memblock.h> |
1da177e4 | 11 | #include <linux/init.h> |
a148ecfd | 12 | #include <linux/io.h> |
9de94dbb | 13 | #include <linux/ioport.h> |
50c6dbdf | 14 | #include <linux/ioremap.h> |
3cbd09e4 TG |
15 | #include <linux/slab.h> |
16 | #include <linux/vmalloc.h> | |
d61fc448 | 17 | #include <linux/mmiotrace.h> |
32cb4d02 | 18 | #include <linux/cc_platform.h> |
8f716c9b | 19 | #include <linux/efi.h> |
65fddcfc | 20 | #include <linux/pgtable.h> |
b073d7f8 | 21 | #include <linux/kmsan.h> |
3cbd09e4 | 22 | |
d1163651 | 23 | #include <asm/set_memory.h> |
66441bd3 | 24 | #include <asm/e820/api.h> |
e55f31a5 | 25 | #include <asm/efi.h> |
3cbd09e4 | 26 | #include <asm/fixmap.h> |
3cbd09e4 | 27 | #include <asm/tlbflush.h> |
f6df72e7 | 28 | #include <asm/pgalloc.h> |
eb243d1d | 29 | #include <asm/memtype.h> |
8f716c9b | 30 | #include <asm/setup.h> |
1da177e4 | 31 | |
78c86e5e | 32 | #include "physaddr.h" |
240d3a7c | 33 | |
5da04cc8 LJ |
34 | /* |
35 | * Descriptor controlling ioremap() behavior. | |
36 | */ | |
37 | struct ioremap_desc { | |
38 | unsigned int flags; | |
0e4c12b4 TL |
39 | }; |
40 | ||
e9332cac TG |
41 | /* |
42 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | |
43 | * conflicts. | |
44 | */ | |
3a96ce8c | 45 | int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
b14097bd | 46 | enum page_cache_mode pcm) |
e9332cac | 47 | { |
d806e5ee | 48 | unsigned long nrpages = size >> PAGE_SHIFT; |
93809be8 | 49 | int err; |
e9332cac | 50 | |
b14097bd JG |
51 | switch (pcm) { |
52 | case _PAGE_CACHE_MODE_UC: | |
d806e5ee | 53 | default: |
1219333d | 54 | err = _set_memory_uc(vaddr, nrpages); |
d806e5ee | 55 | break; |
b14097bd | 56 | case _PAGE_CACHE_MODE_WC: |
b310f381 | 57 | err = _set_memory_wc(vaddr, nrpages); |
58 | break; | |
623dffb2 TK |
59 | case _PAGE_CACHE_MODE_WT: |
60 | err = _set_memory_wt(vaddr, nrpages); | |
61 | break; | |
b14097bd | 62 | case _PAGE_CACHE_MODE_WB: |
1219333d | 63 | err = _set_memory_wb(vaddr, nrpages); |
d806e5ee TG |
64 | break; |
65 | } | |
e9332cac TG |
66 | |
67 | return err; | |
68 | } | |
69 | ||
5da04cc8 LJ |
70 | /* Does the range (or a subset of) contain normal RAM? */ |
71 | static unsigned int __ioremap_check_ram(struct resource *res) | |
c81c8a1e | 72 | { |
0e4c12b4 | 73 | unsigned long start_pfn, stop_pfn; |
49d8d78f | 74 | unsigned long pfn; |
c81c8a1e | 75 | |
0e4c12b4 | 76 | if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM) |
5da04cc8 | 77 | return 0; |
0e4c12b4 TL |
78 | |
79 | start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
80 | stop_pfn = (res->end + 1) >> PAGE_SHIFT; | |
81 | if (stop_pfn > start_pfn) { | |
49d8d78f DW |
82 | for_each_valid_pfn(pfn, start_pfn, stop_pfn) |
83 | if (!PageReserved(pfn_to_page(pfn))) | |
5da04cc8 | 84 | return IORES_MAP_SYSTEM_RAM; |
0e4c12b4 TL |
85 | } |
86 | ||
5da04cc8 | 87 | return 0; |
0e4c12b4 TL |
88 | } |
89 | ||
5da04cc8 LJ |
90 | /* |
91 | * In a SEV guest, NONE and RESERVED should not be mapped encrypted because | |
92 | * there the whole memory is already encrypted. | |
93 | */ | |
94 | static unsigned int __ioremap_check_encrypted(struct resource *res) | |
0e4c12b4 | 95 | { |
4d96f910 | 96 | if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) |
5da04cc8 LJ |
97 | return 0; |
98 | ||
99 | switch (res->desc) { | |
100 | case IORES_DESC_NONE: | |
101 | case IORES_DESC_RESERVED: | |
102 | break; | |
103 | default: | |
104 | return IORES_MAP_ENCRYPTED; | |
105 | } | |
106 | ||
107 | return 0; | |
0e4c12b4 TL |
108 | } |
109 | ||
985e537a TL |
110 | /* |
111 | * The EFI runtime services data area is not covered by walk_mem_res(), but must | |
112 | * be mapped encrypted when SEV is active. | |
113 | */ | |
114 | static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc) | |
115 | { | |
4d96f910 | 116 | if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) |
985e537a TL |
117 | return; |
118 | ||
88e378d4 MK |
119 | if (x86_platform.hyper.is_private_mmio(addr)) { |
120 | desc->flags |= IORES_MAP_ENCRYPTED; | |
121 | return; | |
122 | } | |
123 | ||
870b4333 BP |
124 | if (!IS_ENABLED(CONFIG_EFI)) |
125 | return; | |
126 | ||
8d651ee9 TL |
127 | if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA || |
128 | (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA && | |
129 | efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME)) | |
985e537a TL |
130 | desc->flags |= IORES_MAP_ENCRYPTED; |
131 | } | |
132 | ||
5da04cc8 | 133 | static int __ioremap_collect_map_flags(struct resource *res, void *arg) |
0e4c12b4 | 134 | { |
5da04cc8 | 135 | struct ioremap_desc *desc = arg; |
0e4c12b4 | 136 | |
5da04cc8 LJ |
137 | if (!(desc->flags & IORES_MAP_SYSTEM_RAM)) |
138 | desc->flags |= __ioremap_check_ram(res); | |
0e4c12b4 | 139 | |
5da04cc8 LJ |
140 | if (!(desc->flags & IORES_MAP_ENCRYPTED)) |
141 | desc->flags |= __ioremap_check_encrypted(res); | |
c81c8a1e | 142 | |
5da04cc8 LJ |
143 | return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) == |
144 | (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)); | |
0e4c12b4 TL |
145 | } |
146 | ||
147 | /* | |
148 | * To avoid multiple resource walks, this function walks resources marked as | |
149 | * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a | |
150 | * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES). | |
985e537a TL |
151 | * |
152 | * After that, deal with misc other ranges in __ioremap_check_other() which do | |
153 | * not fall into the above category. | |
0e4c12b4 TL |
154 | */ |
155 | static void __ioremap_check_mem(resource_size_t addr, unsigned long size, | |
5da04cc8 | 156 | struct ioremap_desc *desc) |
0e4c12b4 TL |
157 | { |
158 | u64 start, end; | |
159 | ||
160 | start = (u64)addr; | |
161 | end = start + size - 1; | |
5da04cc8 | 162 | memset(desc, 0, sizeof(struct ioremap_desc)); |
0e4c12b4 | 163 | |
5da04cc8 | 164 | walk_mem_res(start, end, desc, __ioremap_collect_map_flags); |
985e537a TL |
165 | |
166 | __ioremap_check_other(addr, desc); | |
c81c8a1e RD |
167 | } |
168 | ||
1da177e4 LT |
169 | /* |
170 | * Remap an arbitrary physical address space into the kernel virtual | |
5d72b4fb TK |
171 | * address space. It transparently creates kernel huge I/O mapping when |
172 | * the physical address is aligned by a huge page size (1GB or 2MB) and | |
173 | * the requested size is at least the huge page size. | |
174 | * | |
175 | * NOTE: MTRRs can override PAT memory types with a 4KB granularity. | |
176 | * Therefore, the mapping code falls back to use a smaller page toward 4KB | |
177 | * when a mapping range is covered by non-WB type of MTRRs. | |
1da177e4 LT |
178 | * |
179 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | |
180 | * have to convert them into an offset in a page-aligned mapping, but the | |
181 | * caller shouldn't need to know that small detail. | |
182 | */ | |
5da04cc8 LJ |
183 | static void __iomem * |
184 | __ioremap_caller(resource_size_t phys_addr, unsigned long size, | |
185 | enum page_cache_mode pcm, void *caller, bool encrypted) | |
1da177e4 | 186 | { |
ffa71f33 | 187 | unsigned long offset, vaddr; |
0e4c12b4 | 188 | resource_size_t last_addr; |
87e547fe PP |
189 | const resource_size_t unaligned_phys_addr = phys_addr; |
190 | const unsigned long unaligned_size = size; | |
5da04cc8 | 191 | struct ioremap_desc io_desc; |
91eebf40 | 192 | struct vm_struct *area; |
b14097bd | 193 | enum page_cache_mode new_pcm; |
d806e5ee | 194 | pgprot_t prot; |
dee7cbb2 | 195 | int retval; |
d61fc448 | 196 | void __iomem *ret_addr; |
1da177e4 LT |
197 | |
198 | /* Don't allow wraparound or zero size */ | |
199 | last_addr = phys_addr + size - 1; | |
200 | if (!size || last_addr < phys_addr) | |
201 | return NULL; | |
202 | ||
e3100c82 | 203 | if (!phys_addr_valid(phys_addr)) { |
6997ab49 | 204 | printk(KERN_WARNING "ioremap: invalid physical address %llx\n", |
4c8337ac | 205 | (unsigned long long)phys_addr); |
e3100c82 TG |
206 | WARN_ON_ONCE(1); |
207 | return NULL; | |
208 | } | |
209 | ||
5da04cc8 | 210 | __ioremap_check_mem(phys_addr, size, &io_desc); |
0e4c12b4 | 211 | |
1da177e4 LT |
212 | /* |
213 | * Don't allow anybody to remap normal RAM that we're using.. | |
214 | */ | |
5da04cc8 | 215 | if (io_desc.flags & IORES_MAP_SYSTEM_RAM) { |
8a0a5da6 TG |
216 | WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", |
217 | &phys_addr, &last_addr); | |
9a58eebe | 218 | return NULL; |
906e36c5 | 219 | } |
9a58eebe | 220 | |
d7677d40 | 221 | /* |
222 | * Mappings have to be page-aligned | |
223 | */ | |
224 | offset = phys_addr & ~PAGE_MASK; | |
4dbd6a3e | 225 | phys_addr &= PAGE_MASK; |
d7677d40 | 226 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
227 | ||
4dbd6a3e MK |
228 | /* |
229 | * Mask out any bits not part of the actual physical | |
230 | * address, like memory encryption bits. | |
231 | */ | |
232 | phys_addr &= PHYSICAL_PAGE_MASK; | |
233 | ||
ecdd6ee7 | 234 | retval = memtype_reserve(phys_addr, (u64)phys_addr + size, |
e00c8cc9 | 235 | pcm, &new_pcm); |
dee7cbb2 | 236 | if (retval) { |
ecdd6ee7 | 237 | printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval); |
dee7cbb2 VP |
238 | return NULL; |
239 | } | |
240 | ||
b14097bd JG |
241 | if (pcm != new_pcm) { |
242 | if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { | |
279e669b | 243 | printk(KERN_ERR |
b14097bd | 244 | "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n", |
4c8337ac RD |
245 | (unsigned long long)phys_addr, |
246 | (unsigned long long)(phys_addr + size), | |
b14097bd | 247 | pcm, new_pcm); |
de2a47cf | 248 | goto err_free_memtype; |
d7677d40 | 249 | } |
b14097bd | 250 | pcm = new_pcm; |
d7677d40 | 251 | } |
252 | ||
0e4c12b4 TL |
253 | /* |
254 | * If the page being mapped is in memory and SEV is active then | |
255 | * make sure the memory encryption attribute is enabled in the | |
256 | * resulting mapping. | |
9aa6ea69 KS |
257 | * In TDX guests, memory is marked private by default. If encryption |
258 | * is not requested (using encrypted), explicitly set decrypt | |
259 | * attribute in all IOREMAPPED memory. | |
0e4c12b4 | 260 | */ |
b14097bd | 261 | prot = PAGE_KERNEL_IO; |
5da04cc8 | 262 | if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted) |
0e4c12b4 | 263 | prot = pgprot_encrypted(prot); |
9aa6ea69 KS |
264 | else |
265 | prot = pgprot_decrypted(prot); | |
0e4c12b4 | 266 | |
b14097bd JG |
267 | switch (pcm) { |
268 | case _PAGE_CACHE_MODE_UC: | |
d806e5ee | 269 | default: |
b14097bd JG |
270 | prot = __pgprot(pgprot_val(prot) | |
271 | cachemode2protval(_PAGE_CACHE_MODE_UC)); | |
d806e5ee | 272 | break; |
b14097bd JG |
273 | case _PAGE_CACHE_MODE_UC_MINUS: |
274 | prot = __pgprot(pgprot_val(prot) | | |
275 | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); | |
de33c442 | 276 | break; |
b14097bd JG |
277 | case _PAGE_CACHE_MODE_WC: |
278 | prot = __pgprot(pgprot_val(prot) | | |
279 | cachemode2protval(_PAGE_CACHE_MODE_WC)); | |
b310f381 | 280 | break; |
d838270e TK |
281 | case _PAGE_CACHE_MODE_WT: |
282 | prot = __pgprot(pgprot_val(prot) | | |
283 | cachemode2protval(_PAGE_CACHE_MODE_WT)); | |
284 | break; | |
b14097bd | 285 | case _PAGE_CACHE_MODE_WB: |
d806e5ee TG |
286 | break; |
287 | } | |
a148ecfd | 288 | |
1da177e4 LT |
289 | /* |
290 | * Ok, go for it.. | |
291 | */ | |
23016969 | 292 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
1da177e4 | 293 | if (!area) |
de2a47cf | 294 | goto err_free_memtype; |
1da177e4 | 295 | area->phys_addr = phys_addr; |
e66aadbe | 296 | vaddr = (unsigned long) area->addr; |
43a432b1 | 297 | |
ecdd6ee7 | 298 | if (memtype_kernel_map_sync(phys_addr, size, pcm)) |
de2a47cf | 299 | goto err_free_area; |
e9332cac | 300 | |
de2a47cf XF |
301 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) |
302 | goto err_free_area; | |
e9332cac | 303 | |
d61fc448 | 304 | ret_addr = (void __iomem *) (vaddr + offset); |
87e547fe | 305 | mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); |
d61fc448 | 306 | |
c7a7b814 TG |
307 | /* |
308 | * Check if the request spans more than any BAR in the iomem resource | |
309 | * tree. | |
310 | */ | |
9abb0ecd LA |
311 | if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size)) |
312 | pr_warn("caller %pS mapping multiple BARs\n", caller); | |
c7a7b814 | 313 | |
d61fc448 | 314 | return ret_addr; |
de2a47cf XF |
315 | err_free_area: |
316 | free_vm_area(area); | |
317 | err_free_memtype: | |
ecdd6ee7 | 318 | memtype_free(phys_addr, phys_addr + size); |
de2a47cf | 319 | return NULL; |
1da177e4 | 320 | } |
1da177e4 LT |
321 | |
322 | /** | |
c0d94aa5 | 323 | * ioremap - map bus memory into CPU space |
9efc31b8 | 324 | * @phys_addr: bus address of the memory |
1da177e4 LT |
325 | * @size: size of the resource to map |
326 | * | |
c0d94aa5 | 327 | * ioremap performs a platform specific sequence of operations to |
1da177e4 LT |
328 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
329 | * writew/writel functions and the other mmio helpers. The returned | |
330 | * address is not guaranteed to be usable directly as a virtual | |
91eebf40 | 331 | * address. |
1da177e4 LT |
332 | * |
333 | * This version of ioremap ensures that the memory is marked uncachable | |
334 | * on the CPU as well as honouring existing caching rules from things like | |
91eebf40 | 335 | * the PCI bus. Note that there are other caches and buffers on many |
1da177e4 LT |
336 | * busses. In particular driver authors should read up on PCI writes |
337 | * | |
338 | * It's useful if some control registers are in such an area and | |
339 | * write combining or read caching is not desirable: | |
91eebf40 | 340 | * |
1da177e4 LT |
341 | * Must be freed with iounmap. |
342 | */ | |
c0d94aa5 | 343 | void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) |
1da177e4 | 344 | { |
de33c442 SS |
345 | /* |
346 | * Ideally, this should be: | |
cb32edf6 | 347 | * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS; |
de33c442 SS |
348 | * |
349 | * Till we fix all X drivers to use ioremap_wc(), we will use | |
e4b6be33 LR |
350 | * UC MINUS. Drivers that are certain they need or can already |
351 | * be converted over to strong UC can use ioremap_uc(). | |
de33c442 | 352 | */ |
b14097bd | 353 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS; |
de33c442 | 354 | |
b14097bd | 355 | return __ioremap_caller(phys_addr, size, pcm, |
c3a7a61c | 356 | __builtin_return_address(0), false); |
1da177e4 | 357 | } |
c0d94aa5 | 358 | EXPORT_SYMBOL(ioremap); |
1da177e4 | 359 | |
e4b6be33 LR |
360 | /** |
361 | * ioremap_uc - map bus memory into CPU space as strongly uncachable | |
362 | * @phys_addr: bus address of the memory | |
363 | * @size: size of the resource to map | |
364 | * | |
365 | * ioremap_uc performs a platform specific sequence of operations to | |
366 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
367 | * writew/writel functions and the other mmio helpers. The returned | |
368 | * address is not guaranteed to be usable directly as a virtual | |
369 | * address. | |
370 | * | |
371 | * This version of ioremap ensures that the memory is marked with a strong | |
372 | * preference as completely uncachable on the CPU when possible. For non-PAT | |
373 | * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT | |
374 | * systems this will set the PAT entry for the pages as strong UC. This call | |
375 | * will honor existing caching rules from things like the PCI bus. Note that | |
376 | * there are other caches and buffers on many busses. In particular driver | |
377 | * authors should read up on PCI writes. | |
378 | * | |
379 | * It's useful if some control registers are in such an area and | |
380 | * write combining or read caching is not desirable: | |
381 | * | |
382 | * Must be freed with iounmap. | |
383 | */ | |
384 | void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size) | |
385 | { | |
386 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC; | |
387 | ||
388 | return __ioremap_caller(phys_addr, size, pcm, | |
c3a7a61c | 389 | __builtin_return_address(0), false); |
e4b6be33 LR |
390 | } |
391 | EXPORT_SYMBOL_GPL(ioremap_uc); | |
392 | ||
b310f381 | 393 | /** |
394 | * ioremap_wc - map memory into CPU space write combined | |
9efc31b8 | 395 | * @phys_addr: bus address of the memory |
b310f381 | 396 | * @size: size of the resource to map |
397 | * | |
398 | * This version of ioremap ensures that the memory is marked write combining. | |
399 | * Write combining allows faster writes to some hardware devices. | |
400 | * | |
401 | * Must be freed with iounmap. | |
402 | */ | |
d639bab8 | 403 | void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) |
b310f381 | 404 | { |
7202fdb1 | 405 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, |
c3a7a61c | 406 | __builtin_return_address(0), false); |
b310f381 | 407 | } |
408 | EXPORT_SYMBOL(ioremap_wc); | |
409 | ||
d838270e TK |
410 | /** |
411 | * ioremap_wt - map memory into CPU space write through | |
412 | * @phys_addr: bus address of the memory | |
413 | * @size: size of the resource to map | |
414 | * | |
415 | * This version of ioremap ensures that the memory is marked write through. | |
416 | * Write through stores data into memory while keeping the cache up-to-date. | |
417 | * | |
418 | * Must be freed with iounmap. | |
419 | */ | |
420 | void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size) | |
421 | { | |
422 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT, | |
c3a7a61c | 423 | __builtin_return_address(0), false); |
d838270e TK |
424 | } |
425 | EXPORT_SYMBOL(ioremap_wt); | |
426 | ||
c3a7a61c LJ |
427 | void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size) |
428 | { | |
429 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, | |
430 | __builtin_return_address(0), true); | |
431 | } | |
432 | EXPORT_SYMBOL(ioremap_encrypted); | |
433 | ||
b9e76a00 | 434 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) |
5f868152 | 435 | { |
b14097bd | 436 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, |
c3a7a61c | 437 | __builtin_return_address(0), false); |
5f868152 TG |
438 | } |
439 | EXPORT_SYMBOL(ioremap_cache); | |
440 | ||
28b2ee20 | 441 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, |
86758b50 | 442 | pgprot_t prot) |
28b2ee20 | 443 | { |
b14097bd | 444 | return __ioremap_caller(phys_addr, size, |
86758b50 | 445 | pgprot2cachemode(prot), |
c3a7a61c | 446 | __builtin_return_address(0), false); |
28b2ee20 RR |
447 | } |
448 | EXPORT_SYMBOL(ioremap_prot); | |
449 | ||
bf5421c3 AK |
450 | /** |
451 | * iounmap - Free a IO remapping | |
452 | * @addr: virtual address from ioremap_* | |
453 | * | |
454 | * Caller must ensure there is only one unmapping for the same pointer. | |
455 | */ | |
1da177e4 LT |
456 | void iounmap(volatile void __iomem *addr) |
457 | { | |
bf5421c3 | 458 | struct vm_struct *p, *o; |
c23a4e96 | 459 | |
50c6dbdf | 460 | if (WARN_ON_ONCE(!is_ioremap_addr((void __force *)addr))) |
1da177e4 LT |
461 | return; |
462 | ||
463 | /* | |
33c2b803 TL |
464 | * The PCI/ISA range special-casing was removed from __ioremap() |
465 | * so this check, in theory, can be removed. However, there are | |
466 | * cases where iounmap() is called for addresses not obtained via | |
467 | * ioremap() (vga16fb for example). Add a warning so that these | |
468 | * cases can be caught and fixed. | |
1da177e4 | 469 | */ |
6e92a5a6 | 470 | if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && |
33c2b803 TL |
471 | (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) { |
472 | WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n"); | |
1da177e4 | 473 | return; |
33c2b803 | 474 | } |
1da177e4 | 475 | |
6d60ce38 KH |
476 | mmiotrace_iounmap(addr); |
477 | ||
91eebf40 TG |
478 | addr = (volatile void __iomem *) |
479 | (PAGE_MASK & (unsigned long __force)addr); | |
bf5421c3 AK |
480 | |
481 | /* Use the vm area unlocked, assuming the caller | |
482 | ensures there isn't another iounmap for the same address | |
483 | in parallel. Reuse of the virtual address is prevented by | |
484 | leaving it in the global lists until we're done with it. | |
485 | cpa takes care of the direct mappings. */ | |
ef932473 | 486 | p = find_vm_area((void __force *)addr); |
bf5421c3 AK |
487 | |
488 | if (!p) { | |
91eebf40 | 489 | printk(KERN_ERR "iounmap: bad address %p\n", addr); |
c23a4e96 | 490 | dump_stack(); |
bf5421c3 | 491 | return; |
1da177e4 LT |
492 | } |
493 | ||
b073d7f8 AP |
494 | kmsan_iounmap_page_range((unsigned long)addr, |
495 | (unsigned long)addr + get_vm_area_size(p)); | |
ecdd6ee7 | 496 | memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p)); |
d7677d40 | 497 | |
bf5421c3 | 498 | /* Finally remove it */ |
6e92a5a6 | 499 | o = remove_vm_area((void __force *)addr); |
bf5421c3 | 500 | BUG_ON(p != o || o == NULL); |
91eebf40 | 501 | kfree(p); |
1da177e4 | 502 | } |
129f6946 | 503 | EXPORT_SYMBOL(iounmap); |
1da177e4 | 504 | |
81256a50 KS |
505 | void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags) |
506 | { | |
507 | if ((flags & MEMREMAP_DEC) || cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) | |
508 | return (void __force *)ioremap_cache(phys_addr, size); | |
509 | ||
510 | return (void __force *)ioremap_encrypted(phys_addr, size); | |
511 | } | |
512 | ||
e045fb2a | 513 | /* |
514 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | |
515 | * access | |
516 | */ | |
4707a341 | 517 | void *xlate_dev_mem_ptr(phys_addr_t phys) |
e045fb2a | 518 | { |
94d4b476 IM |
519 | unsigned long start = phys & PAGE_MASK; |
520 | unsigned long offset = phys & ~PAGE_MASK; | |
562bfca4 | 521 | void *vaddr; |
e045fb2a | 522 | |
8458bf94 TL |
523 | /* memremap() maps if RAM, otherwise falls back to ioremap() */ |
524 | vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB); | |
e045fb2a | 525 | |
8458bf94 | 526 | /* Only add the offset on success and return NULL if memremap() failed */ |
94d4b476 IM |
527 | if (vaddr) |
528 | vaddr += offset; | |
e045fb2a | 529 | |
562bfca4 | 530 | return vaddr; |
e045fb2a | 531 | } |
532 | ||
4707a341 | 533 | void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) |
e045fb2a | 534 | { |
8458bf94 | 535 | memunmap((void *)((unsigned long)addr & PAGE_MASK)); |
e045fb2a | 536 | } |
537 | ||
402fe0cb | 538 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
8f716c9b TL |
539 | /* |
540 | * Examine the physical address to determine if it is an area of memory | |
541 | * that should be mapped decrypted. If the memory is not part of the | |
542 | * kernel usable area it was accessed and created decrypted, so these | |
1de32862 TL |
543 | * areas should be mapped decrypted. And since the encryption key can |
544 | * change across reboots, persistent memory should also be mapped | |
545 | * decrypted. | |
072f58c6 TL |
546 | * |
547 | * If SEV is active, that implies that BIOS/UEFI also ran encrypted so | |
548 | * only persistent memory should be mapped decrypted. | |
8f716c9b TL |
549 | */ |
550 | static bool memremap_should_map_decrypted(resource_size_t phys_addr, | |
551 | unsigned long size) | |
552 | { | |
1de32862 TL |
553 | int is_pmem; |
554 | ||
555 | /* | |
556 | * Check if the address is part of a persistent memory region. | |
557 | * This check covers areas added by E820, EFI and ACPI. | |
558 | */ | |
559 | is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM, | |
560 | IORES_DESC_PERSISTENT_MEMORY); | |
561 | if (is_pmem != REGION_DISJOINT) | |
562 | return true; | |
563 | ||
564 | /* | |
565 | * Check if the non-volatile attribute is set for an EFI | |
566 | * reserved area. | |
567 | */ | |
568 | if (efi_enabled(EFI_BOOT)) { | |
569 | switch (efi_mem_type(phys_addr)) { | |
570 | case EFI_RESERVED_TYPE: | |
571 | if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV) | |
572 | return true; | |
573 | break; | |
574 | default: | |
575 | break; | |
576 | } | |
577 | } | |
578 | ||
8f716c9b TL |
579 | /* Check if the address is outside kernel usable area */ |
580 | switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) { | |
581 | case E820_TYPE_RESERVED: | |
582 | case E820_TYPE_ACPI: | |
583 | case E820_TYPE_NVS: | |
584 | case E820_TYPE_UNUSABLE: | |
072f58c6 | 585 | /* For SEV, these areas are encrypted */ |
4d96f910 | 586 | if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) |
072f58c6 | 587 | break; |
df561f66 | 588 | fallthrough; |
072f58c6 | 589 | |
1de32862 | 590 | case E820_TYPE_PRAM: |
8f716c9b TL |
591 | return true; |
592 | default: | |
593 | break; | |
594 | } | |
595 | ||
596 | return false; | |
597 | } | |
598 | ||
599 | /* | |
600 | * Examine the physical address to determine if it is EFI data. Check | |
601 | * it against the boot params structure and EFI tables and memory types. | |
602 | */ | |
525077ae | 603 | static bool memremap_is_efi_data(resource_size_t phys_addr) |
8f716c9b TL |
604 | { |
605 | u64 paddr; | |
606 | ||
607 | /* Check if the address is part of EFI boot/runtime data */ | |
608 | if (!efi_enabled(EFI_BOOT)) | |
609 | return false; | |
610 | ||
611 | paddr = boot_params.efi_info.efi_memmap_hi; | |
612 | paddr <<= 32; | |
613 | paddr |= boot_params.efi_info.efi_memmap; | |
614 | if (phys_addr == paddr) | |
615 | return true; | |
616 | ||
617 | paddr = boot_params.efi_info.efi_systab_hi; | |
618 | paddr <<= 32; | |
619 | paddr |= boot_params.efi_info.efi_systab; | |
620 | if (phys_addr == paddr) | |
621 | return true; | |
622 | ||
623 | if (efi_is_table_address(phys_addr)) | |
624 | return true; | |
625 | ||
626 | switch (efi_mem_type(phys_addr)) { | |
627 | case EFI_BOOT_SERVICES_DATA: | |
628 | case EFI_RUNTIME_SERVICES_DATA: | |
629 | return true; | |
630 | default: | |
631 | break; | |
632 | } | |
633 | ||
634 | return false; | |
635 | } | |
636 | ||
637 | /* | |
638 | * Examine the physical address to determine if it is boot data by checking | |
639 | * it against the boot params setup_data chain. | |
640 | */ | |
095ac6fa | 641 | static bool __ref __memremap_is_setup_data(resource_size_t phys_addr, bool early) |
8f716c9b | 642 | { |
095ac6fa | 643 | unsigned int setup_data_sz = sizeof(struct setup_data); |
7228918b | 644 | struct setup_indirect *indirect; |
8f716c9b TL |
645 | struct setup_data *data; |
646 | u64 paddr, paddr_next; | |
647 | ||
648 | paddr = boot_params.hdr.setup_data; | |
649 | while (paddr) { | |
095ac6fa | 650 | unsigned int len, size; |
8f716c9b TL |
651 | |
652 | if (phys_addr == paddr) | |
653 | return true; | |
654 | ||
095ac6fa BH |
655 | if (early) |
656 | data = early_memremap_decrypted(paddr, setup_data_sz); | |
657 | else | |
658 | data = memremap(paddr, setup_data_sz, MEMREMAP_WB | MEMREMAP_DEC); | |
7228918b | 659 | if (!data) { |
095ac6fa | 660 | pr_warn("failed to remap setup_data entry\n"); |
7228918b RP |
661 | return false; |
662 | } | |
8f716c9b | 663 | |
095ac6fa BH |
664 | size = setup_data_sz; |
665 | ||
8f716c9b TL |
666 | paddr_next = data->next; |
667 | len = data->len; | |
668 | ||
8d9ffb2f | 669 | if ((phys_addr > paddr) && |
095ac6fa BH |
670 | (phys_addr < (paddr + setup_data_sz + len))) { |
671 | if (early) | |
672 | early_memunmap(data, setup_data_sz); | |
673 | else | |
674 | memunmap(data); | |
b3c72fc9 DK |
675 | return true; |
676 | } | |
677 | ||
7228918b | 678 | if (data->type == SETUP_INDIRECT) { |
095ac6fa BH |
679 | size += len; |
680 | if (early) { | |
681 | early_memunmap(data, setup_data_sz); | |
682 | data = early_memremap_decrypted(paddr, size); | |
683 | } else { | |
684 | memunmap(data); | |
685 | data = memremap(paddr, size, MEMREMAP_WB | MEMREMAP_DEC); | |
686 | } | |
7228918b | 687 | if (!data) { |
095ac6fa | 688 | pr_warn("failed to remap indirect setup_data\n"); |
7228918b RP |
689 | return false; |
690 | } | |
691 | ||
692 | indirect = (struct setup_indirect *)data->data; | |
693 | ||
694 | if (indirect->type != SETUP_INDIRECT) { | |
695 | paddr = indirect->addr; | |
696 | len = indirect->len; | |
697 | } | |
b3c72fc9 DK |
698 | } |
699 | ||
095ac6fa BH |
700 | if (early) |
701 | early_memunmap(data, size); | |
702 | else | |
703 | memunmap(data); | |
8f716c9b TL |
704 | |
705 | if ((phys_addr > paddr) && (phys_addr < (paddr + len))) | |
706 | return true; | |
707 | ||
708 | paddr = paddr_next; | |
709 | } | |
710 | ||
711 | return false; | |
712 | } | |
713 | ||
525077ae | 714 | static bool memremap_is_setup_data(resource_size_t phys_addr) |
095ac6fa BH |
715 | { |
716 | return __memremap_is_setup_data(phys_addr, false); | |
717 | } | |
718 | ||
525077ae | 719 | static bool __init early_memremap_is_setup_data(resource_size_t phys_addr) |
8f716c9b | 720 | { |
095ac6fa | 721 | return __memremap_is_setup_data(phys_addr, true); |
8f716c9b TL |
722 | } |
723 | ||
724 | /* | |
725 | * Architecture function to determine if RAM remap is allowed. By default, a | |
726 | * RAM remap will map the data as encrypted. Determine if a RAM remap should | |
727 | * not be done so that the data will be mapped decrypted. | |
728 | */ | |
729 | bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, | |
730 | unsigned long flags) | |
731 | { | |
e9d1d2bb | 732 | if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) |
8f716c9b TL |
733 | return true; |
734 | ||
735 | if (flags & MEMREMAP_ENC) | |
736 | return true; | |
737 | ||
738 | if (flags & MEMREMAP_DEC) | |
739 | return false; | |
740 | ||
32cb4d02 | 741 | if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { |
525077ae BH |
742 | if (memremap_is_setup_data(phys_addr) || |
743 | memremap_is_efi_data(phys_addr)) | |
072f58c6 TL |
744 | return false; |
745 | } | |
8f716c9b | 746 | |
072f58c6 | 747 | return !memremap_should_map_decrypted(phys_addr, size); |
8f716c9b TL |
748 | } |
749 | ||
750 | /* | |
751 | * Architecture override of __weak function to adjust the protection attributes | |
752 | * used when remapping memory. By default, early_memremap() will map the data | |
753 | * as encrypted. Determine if an encrypted mapping should not be done and set | |
754 | * the appropriate protection attributes. | |
755 | */ | |
756 | pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, | |
757 | unsigned long size, | |
758 | pgprot_t prot) | |
759 | { | |
072f58c6 TL |
760 | bool encrypted_prot; |
761 | ||
e9d1d2bb | 762 | if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) |
8f716c9b TL |
763 | return prot; |
764 | ||
072f58c6 TL |
765 | encrypted_prot = true; |
766 | ||
32cb4d02 | 767 | if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { |
525077ae BH |
768 | if (early_memremap_is_setup_data(phys_addr) || |
769 | memremap_is_efi_data(phys_addr)) | |
072f58c6 TL |
770 | encrypted_prot = false; |
771 | } | |
772 | ||
773 | if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size)) | |
774 | encrypted_prot = false; | |
8f716c9b | 775 | |
072f58c6 TL |
776 | return encrypted_prot ? pgprot_encrypted(prot) |
777 | : pgprot_decrypted(prot); | |
8f716c9b TL |
778 | } |
779 | ||
8458bf94 TL |
780 | bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) |
781 | { | |
782 | return arch_memremap_can_ram_remap(phys_addr, size, 0); | |
783 | } | |
784 | ||
f88a68fa TL |
785 | /* Remap memory with encryption */ |
786 | void __init *early_memremap_encrypted(resource_size_t phys_addr, | |
787 | unsigned long size) | |
788 | { | |
789 | return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC); | |
790 | } | |
791 | ||
792 | /* | |
793 | * Remap memory with encryption and write-protected - cannot be called | |
794 | * before pat_init() is called | |
795 | */ | |
796 | void __init *early_memremap_encrypted_wp(resource_size_t phys_addr, | |
797 | unsigned long size) | |
798 | { | |
1f6f655e | 799 | if (!x86_has_pat_wp()) |
f88a68fa | 800 | return NULL; |
f88a68fa TL |
801 | return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP); |
802 | } | |
803 | ||
804 | /* Remap memory without encryption */ | |
805 | void __init *early_memremap_decrypted(resource_size_t phys_addr, | |
806 | unsigned long size) | |
807 | { | |
808 | return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC); | |
809 | } | |
810 | ||
811 | /* | |
812 | * Remap memory without encryption and write-protected - cannot be called | |
813 | * before pat_init() is called | |
814 | */ | |
815 | void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, | |
816 | unsigned long size) | |
817 | { | |
1f6f655e | 818 | if (!x86_has_pat_wp()) |
f88a68fa | 819 | return NULL; |
f88a68fa TL |
820 | return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP); |
821 | } | |
ce9084ba | 822 | #endif /* CONFIG_AMD_MEM_ENCRYPT */ |
f88a68fa | 823 | |
45c7b28f | 824 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; |
0947b2f3 | 825 | |
551889a6 | 826 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) |
0947b2f3 | 827 | { |
37cc8d7f | 828 | /* Don't assume we're using swapper_pg_dir at this point */ |
6c690ee1 | 829 | pgd_t *base = __va(read_cr3_pa()); |
37cc8d7f | 830 | pgd_t *pgd = &base[pgd_index(addr)]; |
e0c4f675 KS |
831 | p4d_t *p4d = p4d_offset(pgd, addr); |
832 | pud_t *pud = pud_offset(p4d, addr); | |
551889a6 IC |
833 | pmd_t *pmd = pmd_offset(pud, addr); |
834 | ||
835 | return pmd; | |
0947b2f3 HY |
836 | } |
837 | ||
551889a6 | 838 | static inline pte_t * __init early_ioremap_pte(unsigned long addr) |
0947b2f3 | 839 | { |
551889a6 | 840 | return &bm_pte[pte_index(addr)]; |
0947b2f3 HY |
841 | } |
842 | ||
fef5ba79 JF |
843 | bool __init is_early_ioremap_ptep(pte_t *ptep) |
844 | { | |
845 | return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; | |
846 | } | |
847 | ||
beacfaac | 848 | void __init early_ioremap_init(void) |
0947b2f3 | 849 | { |
551889a6 | 850 | pmd_t *pmd; |
0947b2f3 | 851 | |
73159fdc AL |
852 | #ifdef CONFIG_X86_64 |
853 | BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); | |
854 | #else | |
855 | WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); | |
856 | #endif | |
857 | ||
5b7c73e0 | 858 | early_ioremap_setup(); |
8827247f | 859 | |
551889a6 | 860 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); |
45c7b28f JF |
861 | memset(bm_pte, 0, sizeof(bm_pte)); |
862 | pmd_populate_kernel(&init_mm, pmd, bm_pte); | |
551889a6 | 863 | |
0e3a9549 | 864 | /* |
551889a6 | 865 | * The boot-ioremap range spans multiple pmds, for which |
0e3a9549 IM |
866 | * we are not prepared: |
867 | */ | |
499a5f1e JB |
868 | #define __FIXADDR_TOP (-PAGE_SIZE) |
869 | BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) | |
870 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); | |
871 | #undef __FIXADDR_TOP | |
551889a6 | 872 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { |
0e3a9549 | 873 | WARN_ON(1); |
551889a6 IC |
874 | printk(KERN_WARNING "pmd %p != %p\n", |
875 | pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); | |
91eebf40 | 876 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", |
551889a6 | 877 | fix_to_virt(FIX_BTMAP_BEGIN)); |
91eebf40 | 878 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", |
551889a6 | 879 | fix_to_virt(FIX_BTMAP_END)); |
91eebf40 TG |
880 | |
881 | printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); | |
882 | printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", | |
883 | FIX_BTMAP_BEGIN); | |
0e3a9549 | 884 | } |
0947b2f3 HY |
885 | } |
886 | ||
5b7c73e0 MS |
887 | void __init __early_set_fixmap(enum fixed_addresses idx, |
888 | phys_addr_t phys, pgprot_t flags) | |
0947b2f3 | 889 | { |
551889a6 IC |
890 | unsigned long addr = __fix_to_virt(idx); |
891 | pte_t *pte; | |
0947b2f3 HY |
892 | |
893 | if (idx >= __end_of_fixed_addresses) { | |
894 | BUG(); | |
895 | return; | |
896 | } | |
beacfaac | 897 | pte = early_ioremap_pte(addr); |
4583ed51 | 898 | |
fb43d6cb | 899 | /* Sanitize 'prot' against any unsupported bits: */ |
510bb96f | 900 | pgprot_val(flags) &= __supported_pte_mask; |
fb43d6cb | 901 | |
0947b2f3 | 902 | if (pgprot_val(flags)) |
551889a6 | 903 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); |
0947b2f3 | 904 | else |
4f9c11dd | 905 | pte_clear(&init_mm, addr, pte); |
58430c5d | 906 | flush_tlb_one_kernel(addr); |
0947b2f3 | 907 | } |