x86/mm: Fix newly introduced printk format warnings
[linux-2.6-block.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
d61fc448 15#include <linux/mmiotrace.h>
3cbd09e4 16
1da177e4 17#include <asm/cacheflush.h>
3cbd09e4
TG
18#include <asm/e820.h>
19#include <asm/fixmap.h>
1da177e4 20#include <asm/pgtable.h>
3cbd09e4 21#include <asm/tlbflush.h>
f6df72e7 22#include <asm/pgalloc.h>
d7677d40 23#include <asm/pat.h>
1da177e4 24
78c86e5e 25#include "physaddr.h"
240d3a7c 26
e9332cac
TG
27/*
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
29 * conflicts.
30 */
3a96ce8c 31int ioremap_change_attr(unsigned long vaddr, unsigned long size,
b14097bd 32 enum page_cache_mode pcm)
e9332cac 33{
d806e5ee 34 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 35 int err;
e9332cac 36
b14097bd
JG
37 switch (pcm) {
38 case _PAGE_CACHE_MODE_UC:
d806e5ee 39 default:
1219333d 40 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 41 break;
b14097bd 42 case _PAGE_CACHE_MODE_WC:
b310f381 43 err = _set_memory_wc(vaddr, nrpages);
44 break;
623dffb2
TK
45 case _PAGE_CACHE_MODE_WT:
46 err = _set_memory_wt(vaddr, nrpages);
47 break;
b14097bd 48 case _PAGE_CACHE_MODE_WB:
1219333d 49 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
50 break;
51 }
e9332cac
TG
52
53 return err;
54}
55
c81c8a1e
RD
56static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
57 void *arg)
58{
59 unsigned long i;
60
61 for (i = 0; i < nr_pages; ++i)
62 if (pfn_valid(start_pfn + i) &&
63 !PageReserved(pfn_to_page(start_pfn + i)))
64 return 1;
65
c81c8a1e
RD
66 return 0;
67}
68
1da177e4
LT
69/*
70 * Remap an arbitrary physical address space into the kernel virtual
5d72b4fb
TK
71 * address space. It transparently creates kernel huge I/O mapping when
72 * the physical address is aligned by a huge page size (1GB or 2MB) and
73 * the requested size is at least the huge page size.
74 *
75 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
76 * Therefore, the mapping code falls back to use a smaller page toward 4KB
77 * when a mapping range is covered by non-WB type of MTRRs.
1da177e4
LT
78 *
79 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
80 * have to convert them into an offset in a page-aligned mapping, but the
81 * caller shouldn't need to know that small detail.
82 */
23016969 83static void __iomem *__ioremap_caller(resource_size_t phys_addr,
b14097bd 84 unsigned long size, enum page_cache_mode pcm, void *caller)
1da177e4 85{
ffa71f33
KK
86 unsigned long offset, vaddr;
87 resource_size_t pfn, last_pfn, last_addr;
87e547fe
PP
88 const resource_size_t unaligned_phys_addr = phys_addr;
89 const unsigned long unaligned_size = size;
91eebf40 90 struct vm_struct *area;
b14097bd 91 enum page_cache_mode new_pcm;
d806e5ee 92 pgprot_t prot;
dee7cbb2 93 int retval;
d61fc448 94 void __iomem *ret_addr;
1da177e4
LT
95
96 /* Don't allow wraparound or zero size */
97 last_addr = phys_addr + size - 1;
98 if (!size || last_addr < phys_addr)
99 return NULL;
100
e3100c82 101 if (!phys_addr_valid(phys_addr)) {
6997ab49 102 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 103 (unsigned long long)phys_addr);
e3100c82
TG
104 WARN_ON_ONCE(1);
105 return NULL;
106 }
107
1da177e4
LT
108 /*
109 * Don't remap the low PCI/ISA area, it's always mapped..
110 */
bcc643dc 111 if (is_ISA_range(phys_addr, last_addr))
4b40fcee 112 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
113
114 /*
115 * Don't allow anybody to remap normal RAM that we're using..
116 */
9a58eebe
TK
117 pfn = phys_addr >> PAGE_SHIFT;
118 last_pfn = last_addr >> PAGE_SHIFT;
119 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
1c9cf9b2 120 __ioremap_check_ram) == 1) {
8a0a5da6
TG
121 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
122 &phys_addr, &last_addr);
9a58eebe 123 return NULL;
906e36c5 124 }
9a58eebe 125
d7677d40 126 /*
127 * Mappings have to be page-aligned
128 */
129 offset = phys_addr & ~PAGE_MASK;
ffa71f33 130 phys_addr &= PHYSICAL_PAGE_MASK;
d7677d40 131 size = PAGE_ALIGN(last_addr+1) - phys_addr;
132
e213e877 133 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
e00c8cc9 134 pcm, &new_pcm);
dee7cbb2 135 if (retval) {
279e669b 136 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
dee7cbb2
VP
137 return NULL;
138 }
139
b14097bd
JG
140 if (pcm != new_pcm) {
141 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
279e669b 142 printk(KERN_ERR
b14097bd 143 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
4c8337ac
RD
144 (unsigned long long)phys_addr,
145 (unsigned long long)(phys_addr + size),
b14097bd 146 pcm, new_pcm);
de2a47cf 147 goto err_free_memtype;
d7677d40 148 }
b14097bd 149 pcm = new_pcm;
d7677d40 150 }
151
b14097bd
JG
152 prot = PAGE_KERNEL_IO;
153 switch (pcm) {
154 case _PAGE_CACHE_MODE_UC:
d806e5ee 155 default:
b14097bd
JG
156 prot = __pgprot(pgprot_val(prot) |
157 cachemode2protval(_PAGE_CACHE_MODE_UC));
d806e5ee 158 break;
b14097bd
JG
159 case _PAGE_CACHE_MODE_UC_MINUS:
160 prot = __pgprot(pgprot_val(prot) |
161 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
de33c442 162 break;
b14097bd
JG
163 case _PAGE_CACHE_MODE_WC:
164 prot = __pgprot(pgprot_val(prot) |
165 cachemode2protval(_PAGE_CACHE_MODE_WC));
b310f381 166 break;
d838270e
TK
167 case _PAGE_CACHE_MODE_WT:
168 prot = __pgprot(pgprot_val(prot) |
169 cachemode2protval(_PAGE_CACHE_MODE_WT));
170 break;
b14097bd 171 case _PAGE_CACHE_MODE_WB:
d806e5ee
TG
172 break;
173 }
a148ecfd 174
1da177e4
LT
175 /*
176 * Ok, go for it..
177 */
23016969 178 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4 179 if (!area)
de2a47cf 180 goto err_free_memtype;
1da177e4 181 area->phys_addr = phys_addr;
e66aadbe 182 vaddr = (unsigned long) area->addr;
43a432b1 183
b14097bd 184 if (kernel_map_sync_memtype(phys_addr, size, pcm))
de2a47cf 185 goto err_free_area;
e9332cac 186
de2a47cf
XF
187 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
188 goto err_free_area;
e9332cac 189
d61fc448 190 ret_addr = (void __iomem *) (vaddr + offset);
87e547fe 191 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
d61fc448 192
c7a7b814
TG
193 /*
194 * Check if the request spans more than any BAR in the iomem resource
195 * tree.
196 */
197 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
198 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
199
d61fc448 200 return ret_addr;
de2a47cf
XF
201err_free_area:
202 free_vm_area(area);
203err_free_memtype:
204 free_memtype(phys_addr, phys_addr + size);
205 return NULL;
1da177e4 206}
1da177e4
LT
207
208/**
209 * ioremap_nocache - map bus memory into CPU space
9efc31b8 210 * @phys_addr: bus address of the memory
1da177e4
LT
211 * @size: size of the resource to map
212 *
213 * ioremap_nocache performs a platform specific sequence of operations to
214 * make bus memory CPU accessible via the readb/readw/readl/writeb/
215 * writew/writel functions and the other mmio helpers. The returned
216 * address is not guaranteed to be usable directly as a virtual
91eebf40 217 * address.
1da177e4
LT
218 *
219 * This version of ioremap ensures that the memory is marked uncachable
220 * on the CPU as well as honouring existing caching rules from things like
91eebf40 221 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
222 * busses. In particular driver authors should read up on PCI writes
223 *
224 * It's useful if some control registers are in such an area and
225 * write combining or read caching is not desirable:
91eebf40 226 *
1da177e4
LT
227 * Must be freed with iounmap.
228 */
b9e76a00 229void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 230{
de33c442
SS
231 /*
232 * Ideally, this should be:
cb32edf6 233 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
de33c442
SS
234 *
235 * Till we fix all X drivers to use ioremap_wc(), we will use
e4b6be33
LR
236 * UC MINUS. Drivers that are certain they need or can already
237 * be converted over to strong UC can use ioremap_uc().
de33c442 238 */
b14097bd 239 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
de33c442 240
b14097bd 241 return __ioremap_caller(phys_addr, size, pcm,
23016969 242 __builtin_return_address(0));
1da177e4 243}
129f6946 244EXPORT_SYMBOL(ioremap_nocache);
1da177e4 245
e4b6be33
LR
246/**
247 * ioremap_uc - map bus memory into CPU space as strongly uncachable
248 * @phys_addr: bus address of the memory
249 * @size: size of the resource to map
250 *
251 * ioremap_uc performs a platform specific sequence of operations to
252 * make bus memory CPU accessible via the readb/readw/readl/writeb/
253 * writew/writel functions and the other mmio helpers. The returned
254 * address is not guaranteed to be usable directly as a virtual
255 * address.
256 *
257 * This version of ioremap ensures that the memory is marked with a strong
258 * preference as completely uncachable on the CPU when possible. For non-PAT
259 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
260 * systems this will set the PAT entry for the pages as strong UC. This call
261 * will honor existing caching rules from things like the PCI bus. Note that
262 * there are other caches and buffers on many busses. In particular driver
263 * authors should read up on PCI writes.
264 *
265 * It's useful if some control registers are in such an area and
266 * write combining or read caching is not desirable:
267 *
268 * Must be freed with iounmap.
269 */
270void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
271{
272 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
273
274 return __ioremap_caller(phys_addr, size, pcm,
275 __builtin_return_address(0));
276}
277EXPORT_SYMBOL_GPL(ioremap_uc);
278
b310f381 279/**
280 * ioremap_wc - map memory into CPU space write combined
9efc31b8 281 * @phys_addr: bus address of the memory
b310f381 282 * @size: size of the resource to map
283 *
284 * This version of ioremap ensures that the memory is marked write combining.
285 * Write combining allows faster writes to some hardware devices.
286 *
287 * Must be freed with iounmap.
288 */
d639bab8 289void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
b310f381 290{
7202fdb1 291 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
23016969 292 __builtin_return_address(0));
b310f381 293}
294EXPORT_SYMBOL(ioremap_wc);
295
d838270e
TK
296/**
297 * ioremap_wt - map memory into CPU space write through
298 * @phys_addr: bus address of the memory
299 * @size: size of the resource to map
300 *
301 * This version of ioremap ensures that the memory is marked write through.
302 * Write through stores data into memory while keeping the cache up-to-date.
303 *
304 * Must be freed with iounmap.
305 */
306void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
307{
308 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
309 __builtin_return_address(0));
310}
311EXPORT_SYMBOL(ioremap_wt);
312
b9e76a00 313void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 314{
b14097bd 315 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
23016969 316 __builtin_return_address(0));
5f868152
TG
317}
318EXPORT_SYMBOL(ioremap_cache);
319
28b2ee20
RR
320void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
321 unsigned long prot_val)
322{
b14097bd
JG
323 return __ioremap_caller(phys_addr, size,
324 pgprot2cachemode(__pgprot(prot_val)),
28b2ee20
RR
325 __builtin_return_address(0));
326}
327EXPORT_SYMBOL(ioremap_prot);
328
bf5421c3
AK
329/**
330 * iounmap - Free a IO remapping
331 * @addr: virtual address from ioremap_*
332 *
333 * Caller must ensure there is only one unmapping for the same pointer.
334 */
1da177e4
LT
335void iounmap(volatile void __iomem *addr)
336{
bf5421c3 337 struct vm_struct *p, *o;
c23a4e96
AM
338
339 if ((void __force *)addr <= high_memory)
1da177e4
LT
340 return;
341
342 /*
343 * __ioremap special-cases the PCI/ISA range by not instantiating a
344 * vm_area and by simply returning an address into the kernel mapping
345 * of ISA space. So handle that here.
346 */
6e92a5a6
TG
347 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
348 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
349 return;
350
91eebf40
TG
351 addr = (volatile void __iomem *)
352 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3 353
d61fc448
PP
354 mmiotrace_iounmap(addr);
355
bf5421c3
AK
356 /* Use the vm area unlocked, assuming the caller
357 ensures there isn't another iounmap for the same address
358 in parallel. Reuse of the virtual address is prevented by
359 leaving it in the global lists until we're done with it.
360 cpa takes care of the direct mappings. */
ef932473 361 p = find_vm_area((void __force *)addr);
bf5421c3
AK
362
363 if (!p) {
91eebf40 364 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 365 dump_stack();
bf5421c3 366 return;
1da177e4
LT
367 }
368
d7677d40 369 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
370
bf5421c3 371 /* Finally remove it */
6e92a5a6 372 o = remove_vm_area((void __force *)addr);
bf5421c3 373 BUG_ON(p != o || o == NULL);
91eebf40 374 kfree(p);
1da177e4 375}
129f6946 376EXPORT_SYMBOL(iounmap);
1da177e4 377
1e6277de 378int __init arch_ioremap_pud_supported(void)
5d72b4fb
TK
379{
380#ifdef CONFIG_X86_64
381 return cpu_has_gbpages;
382#else
383 return 0;
384#endif
385}
386
1e6277de 387int __init arch_ioremap_pmd_supported(void)
5d72b4fb
TK
388{
389 return cpu_has_pse;
390}
391
e045fb2a 392/*
393 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
394 * access
395 */
4707a341 396void *xlate_dev_mem_ptr(phys_addr_t phys)
e045fb2a 397{
94d4b476
IM
398 unsigned long start = phys & PAGE_MASK;
399 unsigned long offset = phys & ~PAGE_MASK;
562bfca4 400 void *vaddr;
e045fb2a 401
402 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
403 if (page_is_ram(start >> PAGE_SHIFT))
404 return __va(phys);
405
562bfca4 406 vaddr = ioremap_cache(start, PAGE_SIZE);
94d4b476
IM
407 /* Only add the offset on success and return NULL if the ioremap() failed: */
408 if (vaddr)
409 vaddr += offset;
e045fb2a 410
562bfca4 411 return vaddr;
e045fb2a 412}
413
4707a341 414void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
e045fb2a 415{
416 if (page_is_ram(phys >> PAGE_SHIFT))
417 return;
418
419 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
e045fb2a 420}
421
45c7b28f 422static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
0947b2f3 423
551889a6 424static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 425{
37cc8d7f
JF
426 /* Don't assume we're using swapper_pg_dir at this point */
427 pgd_t *base = __va(read_cr3());
428 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
429 pud_t *pud = pud_offset(pgd, addr);
430 pmd_t *pmd = pmd_offset(pud, addr);
431
432 return pmd;
0947b2f3
HY
433}
434
551889a6 435static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 436{
551889a6 437 return &bm_pte[pte_index(addr)];
0947b2f3
HY
438}
439
fef5ba79
JF
440bool __init is_early_ioremap_ptep(pte_t *ptep)
441{
442 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
443}
444
beacfaac 445void __init early_ioremap_init(void)
0947b2f3 446{
551889a6 447 pmd_t *pmd;
0947b2f3 448
73159fdc
AL
449#ifdef CONFIG_X86_64
450 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
451#else
452 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
453#endif
454
5b7c73e0 455 early_ioremap_setup();
8827247f 456
551889a6 457 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
45c7b28f
JF
458 memset(bm_pte, 0, sizeof(bm_pte));
459 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 460
0e3a9549 461 /*
551889a6 462 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
463 * we are not prepared:
464 */
499a5f1e
JB
465#define __FIXADDR_TOP (-PAGE_SIZE)
466 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
467 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
468#undef __FIXADDR_TOP
551889a6 469 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 470 WARN_ON(1);
551889a6
IC
471 printk(KERN_WARNING "pmd %p != %p\n",
472 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 473 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 474 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 475 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 476 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
477
478 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
479 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
480 FIX_BTMAP_BEGIN);
0e3a9549 481 }
0947b2f3
HY
482}
483
5b7c73e0
MS
484void __init __early_set_fixmap(enum fixed_addresses idx,
485 phys_addr_t phys, pgprot_t flags)
0947b2f3 486{
551889a6
IC
487 unsigned long addr = __fix_to_virt(idx);
488 pte_t *pte;
0947b2f3
HY
489
490 if (idx >= __end_of_fixed_addresses) {
491 BUG();
492 return;
493 }
beacfaac 494 pte = early_ioremap_pte(addr);
4583ed51 495
0947b2f3 496 if (pgprot_val(flags))
551889a6 497 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 498 else
4f9c11dd 499 pte_clear(&init_mm, addr, pte);
0947b2f3
HY
500 __flush_tlb_one(addr);
501}