Merge branch 'linus' into x86/cleanups, before applying dependent patch
[linux-2.6-block.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
d61fc448 15#include <linux/mmiotrace.h>
3cbd09e4 16
1da177e4 17#include <asm/cacheflush.h>
3cbd09e4
TG
18#include <asm/e820.h>
19#include <asm/fixmap.h>
1da177e4 20#include <asm/pgtable.h>
3cbd09e4 21#include <asm/tlbflush.h>
f6df72e7 22#include <asm/pgalloc.h>
d7677d40 23#include <asm/pat.h>
1da177e4 24
78c86e5e 25#include "physaddr.h"
240d3a7c 26
e9332cac
TG
27/*
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
29 * conflicts.
30 */
3a96ce8c 31int ioremap_change_attr(unsigned long vaddr, unsigned long size,
b14097bd 32 enum page_cache_mode pcm)
e9332cac 33{
d806e5ee 34 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 35 int err;
e9332cac 36
b14097bd
JG
37 switch (pcm) {
38 case _PAGE_CACHE_MODE_UC:
d806e5ee 39 default:
1219333d 40 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 41 break;
b14097bd 42 case _PAGE_CACHE_MODE_WC:
b310f381 43 err = _set_memory_wc(vaddr, nrpages);
44 break;
b14097bd 45 case _PAGE_CACHE_MODE_WB:
1219333d 46 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
47 break;
48 }
e9332cac
TG
49
50 return err;
51}
52
c81c8a1e
RD
53static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
54 void *arg)
55{
56 unsigned long i;
57
58 for (i = 0; i < nr_pages; ++i)
59 if (pfn_valid(start_pfn + i) &&
60 !PageReserved(pfn_to_page(start_pfn + i)))
61 return 1;
62
63 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
64
65 return 0;
66}
67
1da177e4
LT
68/*
69 * Remap an arbitrary physical address space into the kernel virtual
5d72b4fb
TK
70 * address space. It transparently creates kernel huge I/O mapping when
71 * the physical address is aligned by a huge page size (1GB or 2MB) and
72 * the requested size is at least the huge page size.
73 *
74 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
75 * Therefore, the mapping code falls back to use a smaller page toward 4KB
76 * when a mapping range is covered by non-WB type of MTRRs.
1da177e4
LT
77 *
78 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
79 * have to convert them into an offset in a page-aligned mapping, but the
80 * caller shouldn't need to know that small detail.
81 */
23016969 82static void __iomem *__ioremap_caller(resource_size_t phys_addr,
b14097bd 83 unsigned long size, enum page_cache_mode pcm, void *caller)
1da177e4 84{
ffa71f33
KK
85 unsigned long offset, vaddr;
86 resource_size_t pfn, last_pfn, last_addr;
87e547fe
PP
87 const resource_size_t unaligned_phys_addr = phys_addr;
88 const unsigned long unaligned_size = size;
91eebf40 89 struct vm_struct *area;
b14097bd 90 enum page_cache_mode new_pcm;
d806e5ee 91 pgprot_t prot;
dee7cbb2 92 int retval;
d61fc448 93 void __iomem *ret_addr;
906e36c5 94 int ram_region;
1da177e4
LT
95
96 /* Don't allow wraparound or zero size */
97 last_addr = phys_addr + size - 1;
98 if (!size || last_addr < phys_addr)
99 return NULL;
100
e3100c82 101 if (!phys_addr_valid(phys_addr)) {
6997ab49 102 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 103 (unsigned long long)phys_addr);
e3100c82
TG
104 WARN_ON_ONCE(1);
105 return NULL;
106 }
107
1da177e4
LT
108 /*
109 * Don't remap the low PCI/ISA area, it's always mapped..
110 */
bcc643dc 111 if (is_ISA_range(phys_addr, last_addr))
4b40fcee 112 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
113
114 /*
115 * Don't allow anybody to remap normal RAM that we're using..
116 */
906e36c5
MT
117 /* First check if whole region can be identified as RAM or not */
118 ram_region = region_is_ram(phys_addr, size);
119 if (ram_region > 0) {
120 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
121 (unsigned long int)phys_addr,
122 (unsigned long int)last_addr);
c81c8a1e 123 return NULL;
906e36c5 124 }
1da177e4 125
906e36c5
MT
126 /* If could not be identified(-1), check page by page */
127 if (ram_region < 0) {
128 pfn = phys_addr >> PAGE_SHIFT;
129 last_pfn = last_addr >> PAGE_SHIFT;
130 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
131 __ioremap_check_ram) == 1)
132 return NULL;
133 }
d7677d40 134 /*
135 * Mappings have to be page-aligned
136 */
137 offset = phys_addr & ~PAGE_MASK;
ffa71f33 138 phys_addr &= PHYSICAL_PAGE_MASK;
d7677d40 139 size = PAGE_ALIGN(last_addr+1) - phys_addr;
140
e213e877 141 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
e00c8cc9 142 pcm, &new_pcm);
dee7cbb2 143 if (retval) {
279e669b 144 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
dee7cbb2
VP
145 return NULL;
146 }
147
b14097bd
JG
148 if (pcm != new_pcm) {
149 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
279e669b 150 printk(KERN_ERR
b14097bd 151 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
4c8337ac
RD
152 (unsigned long long)phys_addr,
153 (unsigned long long)(phys_addr + size),
b14097bd 154 pcm, new_pcm);
de2a47cf 155 goto err_free_memtype;
d7677d40 156 }
b14097bd 157 pcm = new_pcm;
d7677d40 158 }
159
b14097bd
JG
160 prot = PAGE_KERNEL_IO;
161 switch (pcm) {
162 case _PAGE_CACHE_MODE_UC:
d806e5ee 163 default:
b14097bd
JG
164 prot = __pgprot(pgprot_val(prot) |
165 cachemode2protval(_PAGE_CACHE_MODE_UC));
d806e5ee 166 break;
b14097bd
JG
167 case _PAGE_CACHE_MODE_UC_MINUS:
168 prot = __pgprot(pgprot_val(prot) |
169 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
de33c442 170 break;
b14097bd
JG
171 case _PAGE_CACHE_MODE_WC:
172 prot = __pgprot(pgprot_val(prot) |
173 cachemode2protval(_PAGE_CACHE_MODE_WC));
b310f381 174 break;
b14097bd 175 case _PAGE_CACHE_MODE_WB:
d806e5ee
TG
176 break;
177 }
a148ecfd 178
1da177e4
LT
179 /*
180 * Ok, go for it..
181 */
23016969 182 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4 183 if (!area)
de2a47cf 184 goto err_free_memtype;
1da177e4 185 area->phys_addr = phys_addr;
e66aadbe 186 vaddr = (unsigned long) area->addr;
43a432b1 187
b14097bd 188 if (kernel_map_sync_memtype(phys_addr, size, pcm))
de2a47cf 189 goto err_free_area;
e9332cac 190
de2a47cf
XF
191 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
192 goto err_free_area;
e9332cac 193
d61fc448 194 ret_addr = (void __iomem *) (vaddr + offset);
87e547fe 195 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
d61fc448 196
c7a7b814
TG
197 /*
198 * Check if the request spans more than any BAR in the iomem resource
199 * tree.
200 */
201 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
202 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
203
d61fc448 204 return ret_addr;
de2a47cf
XF
205err_free_area:
206 free_vm_area(area);
207err_free_memtype:
208 free_memtype(phys_addr, phys_addr + size);
209 return NULL;
1da177e4 210}
1da177e4
LT
211
212/**
213 * ioremap_nocache - map bus memory into CPU space
9efc31b8 214 * @phys_addr: bus address of the memory
1da177e4
LT
215 * @size: size of the resource to map
216 *
217 * ioremap_nocache performs a platform specific sequence of operations to
218 * make bus memory CPU accessible via the readb/readw/readl/writeb/
219 * writew/writel functions and the other mmio helpers. The returned
220 * address is not guaranteed to be usable directly as a virtual
91eebf40 221 * address.
1da177e4
LT
222 *
223 * This version of ioremap ensures that the memory is marked uncachable
224 * on the CPU as well as honouring existing caching rules from things like
91eebf40 225 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
226 * busses. In particular driver authors should read up on PCI writes
227 *
228 * It's useful if some control registers are in such an area and
229 * write combining or read caching is not desirable:
91eebf40 230 *
1da177e4
LT
231 * Must be freed with iounmap.
232 */
b9e76a00 233void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 234{
de33c442
SS
235 /*
236 * Ideally, this should be:
b14097bd 237 * pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
de33c442
SS
238 *
239 * Till we fix all X drivers to use ioremap_wc(), we will use
240 * UC MINUS.
241 */
b14097bd 242 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
de33c442 243
b14097bd 244 return __ioremap_caller(phys_addr, size, pcm,
23016969 245 __builtin_return_address(0));
1da177e4 246}
129f6946 247EXPORT_SYMBOL(ioremap_nocache);
1da177e4 248
b310f381 249/**
250 * ioremap_wc - map memory into CPU space write combined
9efc31b8 251 * @phys_addr: bus address of the memory
b310f381 252 * @size: size of the resource to map
253 *
254 * This version of ioremap ensures that the memory is marked write combining.
255 * Write combining allows faster writes to some hardware devices.
256 *
257 * Must be freed with iounmap.
258 */
d639bab8 259void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
b310f381 260{
499f8f84 261 if (pat_enabled)
b14097bd 262 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
23016969 263 __builtin_return_address(0));
b310f381 264 else
265 return ioremap_nocache(phys_addr, size);
266}
267EXPORT_SYMBOL(ioremap_wc);
268
b9e76a00 269void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 270{
b14097bd 271 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
23016969 272 __builtin_return_address(0));
5f868152
TG
273}
274EXPORT_SYMBOL(ioremap_cache);
275
28b2ee20
RR
276void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
277 unsigned long prot_val)
278{
b14097bd
JG
279 return __ioremap_caller(phys_addr, size,
280 pgprot2cachemode(__pgprot(prot_val)),
28b2ee20
RR
281 __builtin_return_address(0));
282}
283EXPORT_SYMBOL(ioremap_prot);
284
bf5421c3
AK
285/**
286 * iounmap - Free a IO remapping
287 * @addr: virtual address from ioremap_*
288 *
289 * Caller must ensure there is only one unmapping for the same pointer.
290 */
1da177e4
LT
291void iounmap(volatile void __iomem *addr)
292{
bf5421c3 293 struct vm_struct *p, *o;
c23a4e96
AM
294
295 if ((void __force *)addr <= high_memory)
1da177e4
LT
296 return;
297
298 /*
299 * __ioremap special-cases the PCI/ISA range by not instantiating a
300 * vm_area and by simply returning an address into the kernel mapping
301 * of ISA space. So handle that here.
302 */
6e92a5a6
TG
303 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
304 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
305 return;
306
91eebf40
TG
307 addr = (volatile void __iomem *)
308 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3 309
d61fc448
PP
310 mmiotrace_iounmap(addr);
311
bf5421c3
AK
312 /* Use the vm area unlocked, assuming the caller
313 ensures there isn't another iounmap for the same address
314 in parallel. Reuse of the virtual address is prevented by
315 leaving it in the global lists until we're done with it.
316 cpa takes care of the direct mappings. */
ef932473 317 p = find_vm_area((void __force *)addr);
bf5421c3
AK
318
319 if (!p) {
91eebf40 320 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 321 dump_stack();
bf5421c3 322 return;
1da177e4
LT
323 }
324
d7677d40 325 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
326
bf5421c3 327 /* Finally remove it */
6e92a5a6 328 o = remove_vm_area((void __force *)addr);
bf5421c3 329 BUG_ON(p != o || o == NULL);
91eebf40 330 kfree(p);
1da177e4 331}
129f6946 332EXPORT_SYMBOL(iounmap);
1da177e4 333
5d72b4fb
TK
334int arch_ioremap_pud_supported(void)
335{
336#ifdef CONFIG_X86_64
337 return cpu_has_gbpages;
338#else
339 return 0;
340#endif
341}
342
343int arch_ioremap_pmd_supported(void)
344{
345 return cpu_has_pse;
346}
347
e045fb2a 348/*
349 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
350 * access
351 */
4707a341 352void *xlate_dev_mem_ptr(phys_addr_t phys)
e045fb2a 353{
94d4b476
IM
354 unsigned long start = phys & PAGE_MASK;
355 unsigned long offset = phys & ~PAGE_MASK;
356 unsigned long vaddr;
e045fb2a 357
358 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
359 if (page_is_ram(start >> PAGE_SHIFT))
360 return __va(phys);
361
94d4b476
IM
362 vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
363 /* Only add the offset on success and return NULL if the ioremap() failed: */
364 if (vaddr)
365 vaddr += offset;
e045fb2a 366
94d4b476 367 return (void *)vaddr;
e045fb2a 368}
369
4707a341 370void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
e045fb2a 371{
372 if (page_is_ram(phys >> PAGE_SHIFT))
373 return;
374
375 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
376 return;
377}
378
45c7b28f 379static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
0947b2f3 380
551889a6 381static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 382{
37cc8d7f
JF
383 /* Don't assume we're using swapper_pg_dir at this point */
384 pgd_t *base = __va(read_cr3());
385 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
386 pud_t *pud = pud_offset(pgd, addr);
387 pmd_t *pmd = pmd_offset(pud, addr);
388
389 return pmd;
0947b2f3
HY
390}
391
551889a6 392static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 393{
551889a6 394 return &bm_pte[pte_index(addr)];
0947b2f3
HY
395}
396
fef5ba79
JF
397bool __init is_early_ioremap_ptep(pte_t *ptep)
398{
399 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
400}
401
beacfaac 402void __init early_ioremap_init(void)
0947b2f3 403{
551889a6 404 pmd_t *pmd;
0947b2f3 405
73159fdc
AL
406#ifdef CONFIG_X86_64
407 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
408#else
409 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
410#endif
411
5b7c73e0 412 early_ioremap_setup();
8827247f 413
551889a6 414 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
45c7b28f
JF
415 memset(bm_pte, 0, sizeof(bm_pte));
416 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 417
0e3a9549 418 /*
551889a6 419 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
420 * we are not prepared:
421 */
499a5f1e
JB
422#define __FIXADDR_TOP (-PAGE_SIZE)
423 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
424 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
425#undef __FIXADDR_TOP
551889a6 426 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 427 WARN_ON(1);
551889a6
IC
428 printk(KERN_WARNING "pmd %p != %p\n",
429 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 430 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 431 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 432 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 433 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
434
435 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
436 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
437 FIX_BTMAP_BEGIN);
0e3a9549 438 }
0947b2f3
HY
439}
440
5b7c73e0
MS
441void __init __early_set_fixmap(enum fixed_addresses idx,
442 phys_addr_t phys, pgprot_t flags)
0947b2f3 443{
551889a6
IC
444 unsigned long addr = __fix_to_virt(idx);
445 pte_t *pte;
0947b2f3
HY
446
447 if (idx >= __end_of_fixed_addresses) {
448 BUG();
449 return;
450 }
beacfaac 451 pte = early_ioremap_pte(addr);
4583ed51 452
0947b2f3 453 if (pgprot_val(flags))
551889a6 454 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 455 else
4f9c11dd 456 pte_clear(&init_mm, addr, pte);
0947b2f3
HY
457 __flush_tlb_one(addr);
458}