Merge branch 'for-linus-4.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
d61fc448 15#include <linux/mmiotrace.h>
3cbd09e4 16
1da177e4 17#include <asm/cacheflush.h>
3cbd09e4
TG
18#include <asm/e820.h>
19#include <asm/fixmap.h>
1da177e4 20#include <asm/pgtable.h>
3cbd09e4 21#include <asm/tlbflush.h>
f6df72e7 22#include <asm/pgalloc.h>
d7677d40 23#include <asm/pat.h>
1da177e4 24
78c86e5e 25#include "physaddr.h"
240d3a7c 26
e9332cac
TG
27/*
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
29 * conflicts.
30 */
3a96ce8c 31int ioremap_change_attr(unsigned long vaddr, unsigned long size,
b14097bd 32 enum page_cache_mode pcm)
e9332cac 33{
d806e5ee 34 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 35 int err;
e9332cac 36
b14097bd
JG
37 switch (pcm) {
38 case _PAGE_CACHE_MODE_UC:
d806e5ee 39 default:
1219333d 40 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 41 break;
b14097bd 42 case _PAGE_CACHE_MODE_WC:
b310f381 43 err = _set_memory_wc(vaddr, nrpages);
44 break;
623dffb2
TK
45 case _PAGE_CACHE_MODE_WT:
46 err = _set_memory_wt(vaddr, nrpages);
47 break;
b14097bd 48 case _PAGE_CACHE_MODE_WB:
1219333d 49 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
50 break;
51 }
e9332cac
TG
52
53 return err;
54}
55
c81c8a1e
RD
56static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
57 void *arg)
58{
59 unsigned long i;
60
61 for (i = 0; i < nr_pages; ++i)
62 if (pfn_valid(start_pfn + i) &&
63 !PageReserved(pfn_to_page(start_pfn + i)))
64 return 1;
65
66 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
67
68 return 0;
69}
70
1da177e4
LT
71/*
72 * Remap an arbitrary physical address space into the kernel virtual
5d72b4fb
TK
73 * address space. It transparently creates kernel huge I/O mapping when
74 * the physical address is aligned by a huge page size (1GB or 2MB) and
75 * the requested size is at least the huge page size.
76 *
77 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
78 * Therefore, the mapping code falls back to use a smaller page toward 4KB
79 * when a mapping range is covered by non-WB type of MTRRs.
1da177e4
LT
80 *
81 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
82 * have to convert them into an offset in a page-aligned mapping, but the
83 * caller shouldn't need to know that small detail.
84 */
23016969 85static void __iomem *__ioremap_caller(resource_size_t phys_addr,
b14097bd 86 unsigned long size, enum page_cache_mode pcm, void *caller)
1da177e4 87{
ffa71f33
KK
88 unsigned long offset, vaddr;
89 resource_size_t pfn, last_pfn, last_addr;
87e547fe
PP
90 const resource_size_t unaligned_phys_addr = phys_addr;
91 const unsigned long unaligned_size = size;
91eebf40 92 struct vm_struct *area;
b14097bd 93 enum page_cache_mode new_pcm;
d806e5ee 94 pgprot_t prot;
dee7cbb2 95 int retval;
d61fc448 96 void __iomem *ret_addr;
906e36c5 97 int ram_region;
1da177e4
LT
98
99 /* Don't allow wraparound or zero size */
100 last_addr = phys_addr + size - 1;
101 if (!size || last_addr < phys_addr)
102 return NULL;
103
e3100c82 104 if (!phys_addr_valid(phys_addr)) {
6997ab49 105 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 106 (unsigned long long)phys_addr);
e3100c82
TG
107 WARN_ON_ONCE(1);
108 return NULL;
109 }
110
1da177e4
LT
111 /*
112 * Don't remap the low PCI/ISA area, it's always mapped..
113 */
bcc643dc 114 if (is_ISA_range(phys_addr, last_addr))
4b40fcee 115 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
116
117 /*
118 * Don't allow anybody to remap normal RAM that we're using..
119 */
906e36c5
MT
120 /* First check if whole region can be identified as RAM or not */
121 ram_region = region_is_ram(phys_addr, size);
122 if (ram_region > 0) {
123 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
124 (unsigned long int)phys_addr,
125 (unsigned long int)last_addr);
c81c8a1e 126 return NULL;
906e36c5 127 }
1da177e4 128
906e36c5
MT
129 /* If could not be identified(-1), check page by page */
130 if (ram_region < 0) {
131 pfn = phys_addr >> PAGE_SHIFT;
132 last_pfn = last_addr >> PAGE_SHIFT;
133 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
134 __ioremap_check_ram) == 1)
135 return NULL;
136 }
d7677d40 137 /*
138 * Mappings have to be page-aligned
139 */
140 offset = phys_addr & ~PAGE_MASK;
ffa71f33 141 phys_addr &= PHYSICAL_PAGE_MASK;
d7677d40 142 size = PAGE_ALIGN(last_addr+1) - phys_addr;
143
e213e877 144 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
e00c8cc9 145 pcm, &new_pcm);
dee7cbb2 146 if (retval) {
279e669b 147 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
dee7cbb2
VP
148 return NULL;
149 }
150
b14097bd
JG
151 if (pcm != new_pcm) {
152 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
279e669b 153 printk(KERN_ERR
b14097bd 154 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
4c8337ac
RD
155 (unsigned long long)phys_addr,
156 (unsigned long long)(phys_addr + size),
b14097bd 157 pcm, new_pcm);
de2a47cf 158 goto err_free_memtype;
d7677d40 159 }
b14097bd 160 pcm = new_pcm;
d7677d40 161 }
162
b14097bd
JG
163 prot = PAGE_KERNEL_IO;
164 switch (pcm) {
165 case _PAGE_CACHE_MODE_UC:
d806e5ee 166 default:
b14097bd
JG
167 prot = __pgprot(pgprot_val(prot) |
168 cachemode2protval(_PAGE_CACHE_MODE_UC));
d806e5ee 169 break;
b14097bd
JG
170 case _PAGE_CACHE_MODE_UC_MINUS:
171 prot = __pgprot(pgprot_val(prot) |
172 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
de33c442 173 break;
b14097bd
JG
174 case _PAGE_CACHE_MODE_WC:
175 prot = __pgprot(pgprot_val(prot) |
176 cachemode2protval(_PAGE_CACHE_MODE_WC));
b310f381 177 break;
d838270e
TK
178 case _PAGE_CACHE_MODE_WT:
179 prot = __pgprot(pgprot_val(prot) |
180 cachemode2protval(_PAGE_CACHE_MODE_WT));
181 break;
b14097bd 182 case _PAGE_CACHE_MODE_WB:
d806e5ee
TG
183 break;
184 }
a148ecfd 185
1da177e4
LT
186 /*
187 * Ok, go for it..
188 */
23016969 189 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4 190 if (!area)
de2a47cf 191 goto err_free_memtype;
1da177e4 192 area->phys_addr = phys_addr;
e66aadbe 193 vaddr = (unsigned long) area->addr;
43a432b1 194
b14097bd 195 if (kernel_map_sync_memtype(phys_addr, size, pcm))
de2a47cf 196 goto err_free_area;
e9332cac 197
de2a47cf
XF
198 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
199 goto err_free_area;
e9332cac 200
d61fc448 201 ret_addr = (void __iomem *) (vaddr + offset);
87e547fe 202 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
d61fc448 203
c7a7b814
TG
204 /*
205 * Check if the request spans more than any BAR in the iomem resource
206 * tree.
207 */
208 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
209 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
210
d61fc448 211 return ret_addr;
de2a47cf
XF
212err_free_area:
213 free_vm_area(area);
214err_free_memtype:
215 free_memtype(phys_addr, phys_addr + size);
216 return NULL;
1da177e4 217}
1da177e4
LT
218
219/**
220 * ioremap_nocache - map bus memory into CPU space
9efc31b8 221 * @phys_addr: bus address of the memory
1da177e4
LT
222 * @size: size of the resource to map
223 *
224 * ioremap_nocache performs a platform specific sequence of operations to
225 * make bus memory CPU accessible via the readb/readw/readl/writeb/
226 * writew/writel functions and the other mmio helpers. The returned
227 * address is not guaranteed to be usable directly as a virtual
91eebf40 228 * address.
1da177e4
LT
229 *
230 * This version of ioremap ensures that the memory is marked uncachable
231 * on the CPU as well as honouring existing caching rules from things like
91eebf40 232 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
233 * busses. In particular driver authors should read up on PCI writes
234 *
235 * It's useful if some control registers are in such an area and
236 * write combining or read caching is not desirable:
91eebf40 237 *
1da177e4
LT
238 * Must be freed with iounmap.
239 */
b9e76a00 240void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 241{
de33c442
SS
242 /*
243 * Ideally, this should be:
cb32edf6 244 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
de33c442
SS
245 *
246 * Till we fix all X drivers to use ioremap_wc(), we will use
e4b6be33
LR
247 * UC MINUS. Drivers that are certain they need or can already
248 * be converted over to strong UC can use ioremap_uc().
de33c442 249 */
b14097bd 250 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
de33c442 251
b14097bd 252 return __ioremap_caller(phys_addr, size, pcm,
23016969 253 __builtin_return_address(0));
1da177e4 254}
129f6946 255EXPORT_SYMBOL(ioremap_nocache);
1da177e4 256
e4b6be33
LR
257/**
258 * ioremap_uc - map bus memory into CPU space as strongly uncachable
259 * @phys_addr: bus address of the memory
260 * @size: size of the resource to map
261 *
262 * ioremap_uc performs a platform specific sequence of operations to
263 * make bus memory CPU accessible via the readb/readw/readl/writeb/
264 * writew/writel functions and the other mmio helpers. The returned
265 * address is not guaranteed to be usable directly as a virtual
266 * address.
267 *
268 * This version of ioremap ensures that the memory is marked with a strong
269 * preference as completely uncachable on the CPU when possible. For non-PAT
270 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
271 * systems this will set the PAT entry for the pages as strong UC. This call
272 * will honor existing caching rules from things like the PCI bus. Note that
273 * there are other caches and buffers on many busses. In particular driver
274 * authors should read up on PCI writes.
275 *
276 * It's useful if some control registers are in such an area and
277 * write combining or read caching is not desirable:
278 *
279 * Must be freed with iounmap.
280 */
281void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
282{
283 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
284
285 return __ioremap_caller(phys_addr, size, pcm,
286 __builtin_return_address(0));
287}
288EXPORT_SYMBOL_GPL(ioremap_uc);
289
b310f381 290/**
291 * ioremap_wc - map memory into CPU space write combined
9efc31b8 292 * @phys_addr: bus address of the memory
b310f381 293 * @size: size of the resource to map
294 *
295 * This version of ioremap ensures that the memory is marked write combining.
296 * Write combining allows faster writes to some hardware devices.
297 *
298 * Must be freed with iounmap.
299 */
d639bab8 300void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
b310f381 301{
7202fdb1 302 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
23016969 303 __builtin_return_address(0));
b310f381 304}
305EXPORT_SYMBOL(ioremap_wc);
306
d838270e
TK
307/**
308 * ioremap_wt - map memory into CPU space write through
309 * @phys_addr: bus address of the memory
310 * @size: size of the resource to map
311 *
312 * This version of ioremap ensures that the memory is marked write through.
313 * Write through stores data into memory while keeping the cache up-to-date.
314 *
315 * Must be freed with iounmap.
316 */
317void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
318{
319 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
320 __builtin_return_address(0));
321}
322EXPORT_SYMBOL(ioremap_wt);
323
b9e76a00 324void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 325{
b14097bd 326 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
23016969 327 __builtin_return_address(0));
5f868152
TG
328}
329EXPORT_SYMBOL(ioremap_cache);
330
28b2ee20
RR
331void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
332 unsigned long prot_val)
333{
b14097bd
JG
334 return __ioremap_caller(phys_addr, size,
335 pgprot2cachemode(__pgprot(prot_val)),
28b2ee20
RR
336 __builtin_return_address(0));
337}
338EXPORT_SYMBOL(ioremap_prot);
339
bf5421c3
AK
340/**
341 * iounmap - Free a IO remapping
342 * @addr: virtual address from ioremap_*
343 *
344 * Caller must ensure there is only one unmapping for the same pointer.
345 */
1da177e4
LT
346void iounmap(volatile void __iomem *addr)
347{
bf5421c3 348 struct vm_struct *p, *o;
c23a4e96
AM
349
350 if ((void __force *)addr <= high_memory)
1da177e4
LT
351 return;
352
353 /*
354 * __ioremap special-cases the PCI/ISA range by not instantiating a
355 * vm_area and by simply returning an address into the kernel mapping
356 * of ISA space. So handle that here.
357 */
6e92a5a6
TG
358 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
359 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
360 return;
361
91eebf40
TG
362 addr = (volatile void __iomem *)
363 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3 364
d61fc448
PP
365 mmiotrace_iounmap(addr);
366
bf5421c3
AK
367 /* Use the vm area unlocked, assuming the caller
368 ensures there isn't another iounmap for the same address
369 in parallel. Reuse of the virtual address is prevented by
370 leaving it in the global lists until we're done with it.
371 cpa takes care of the direct mappings. */
ef932473 372 p = find_vm_area((void __force *)addr);
bf5421c3
AK
373
374 if (!p) {
91eebf40 375 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 376 dump_stack();
bf5421c3 377 return;
1da177e4
LT
378 }
379
d7677d40 380 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
381
bf5421c3 382 /* Finally remove it */
6e92a5a6 383 o = remove_vm_area((void __force *)addr);
bf5421c3 384 BUG_ON(p != o || o == NULL);
91eebf40 385 kfree(p);
1da177e4 386}
129f6946 387EXPORT_SYMBOL(iounmap);
1da177e4 388
1e6277de 389int __init arch_ioremap_pud_supported(void)
5d72b4fb
TK
390{
391#ifdef CONFIG_X86_64
392 return cpu_has_gbpages;
393#else
394 return 0;
395#endif
396}
397
1e6277de 398int __init arch_ioremap_pmd_supported(void)
5d72b4fb
TK
399{
400 return cpu_has_pse;
401}
402
e045fb2a 403/*
404 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
405 * access
406 */
4707a341 407void *xlate_dev_mem_ptr(phys_addr_t phys)
e045fb2a 408{
94d4b476
IM
409 unsigned long start = phys & PAGE_MASK;
410 unsigned long offset = phys & ~PAGE_MASK;
562bfca4 411 void *vaddr;
e045fb2a 412
413 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
414 if (page_is_ram(start >> PAGE_SHIFT))
415 return __va(phys);
416
562bfca4 417 vaddr = ioremap_cache(start, PAGE_SIZE);
94d4b476
IM
418 /* Only add the offset on success and return NULL if the ioremap() failed: */
419 if (vaddr)
420 vaddr += offset;
e045fb2a 421
562bfca4 422 return vaddr;
e045fb2a 423}
424
4707a341 425void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
e045fb2a 426{
427 if (page_is_ram(phys >> PAGE_SHIFT))
428 return;
429
430 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
e045fb2a 431}
432
45c7b28f 433static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
0947b2f3 434
551889a6 435static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 436{
37cc8d7f
JF
437 /* Don't assume we're using swapper_pg_dir at this point */
438 pgd_t *base = __va(read_cr3());
439 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
440 pud_t *pud = pud_offset(pgd, addr);
441 pmd_t *pmd = pmd_offset(pud, addr);
442
443 return pmd;
0947b2f3
HY
444}
445
551889a6 446static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 447{
551889a6 448 return &bm_pte[pte_index(addr)];
0947b2f3
HY
449}
450
fef5ba79
JF
451bool __init is_early_ioremap_ptep(pte_t *ptep)
452{
453 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
454}
455
beacfaac 456void __init early_ioremap_init(void)
0947b2f3 457{
551889a6 458 pmd_t *pmd;
0947b2f3 459
73159fdc
AL
460#ifdef CONFIG_X86_64
461 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
462#else
463 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
464#endif
465
5b7c73e0 466 early_ioremap_setup();
8827247f 467
551889a6 468 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
45c7b28f
JF
469 memset(bm_pte, 0, sizeof(bm_pte));
470 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 471
0e3a9549 472 /*
551889a6 473 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
474 * we are not prepared:
475 */
499a5f1e
JB
476#define __FIXADDR_TOP (-PAGE_SIZE)
477 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
478 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
479#undef __FIXADDR_TOP
551889a6 480 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 481 WARN_ON(1);
551889a6
IC
482 printk(KERN_WARNING "pmd %p != %p\n",
483 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 484 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 485 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 486 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 487 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
488
489 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
490 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
491 FIX_BTMAP_BEGIN);
0e3a9549 492 }
0947b2f3
HY
493}
494
5b7c73e0
MS
495void __init __early_set_fixmap(enum fixed_addresses idx,
496 phys_addr_t phys, pgprot_t flags)
0947b2f3 497{
551889a6
IC
498 unsigned long addr = __fix_to_virt(idx);
499 pte_t *pte;
0947b2f3
HY
500
501 if (idx >= __end_of_fixed_addresses) {
502 BUG();
503 return;
504 }
beacfaac 505 pte = early_ioremap_pte(addr);
4583ed51 506
0947b2f3 507 if (pgprot_val(flags))
551889a6 508 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 509 else
4f9c11dd 510 pte_clear(&init_mm, addr, pte);
0947b2f3
HY
511 __flush_tlb_one(addr);
512}