Merge commit 'v2.6.30-rc5' into core/iommu
[linux-2.6-block.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
d61fc448 15#include <linux/mmiotrace.h>
3cbd09e4 16
1da177e4 17#include <asm/cacheflush.h>
3cbd09e4
TG
18#include <asm/e820.h>
19#include <asm/fixmap.h>
1da177e4 20#include <asm/pgtable.h>
3cbd09e4 21#include <asm/tlbflush.h>
f6df72e7 22#include <asm/pgalloc.h>
d7677d40 23#include <asm/pat.h>
1da177e4 24
13c6c532 25static inline int phys_addr_valid(resource_size_t addr)
240d3a7c 26{
13c6c532
JB
27#ifdef CONFIG_PHYS_ADDR_T_64BIT
28 return !(addr >> boot_cpu_data.x86_phys_bits);
29#else
30 return 1;
31#endif
240d3a7c 32}
240d3a7c 33
13c6c532
JB
34#ifdef CONFIG_X86_64
35
59ea7463 36unsigned long __phys_addr(unsigned long x)
e3100c82 37{
59ea7463
JS
38 if (x >= __START_KERNEL_map) {
39 x -= __START_KERNEL_map;
40 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
41 x += phys_base;
42 } else {
43 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
44 x -= PAGE_OFFSET;
ed26dbe5 45 VIRTUAL_BUG_ON(!phys_addr_valid(x));
59ea7463
JS
46 }
47 return x;
e3100c82 48}
59ea7463 49EXPORT_SYMBOL(__phys_addr);
e3100c82 50
af5c2bd1
VN
51bool __virt_addr_valid(unsigned long x)
52{
53 if (x >= __START_KERNEL_map) {
54 x -= __START_KERNEL_map;
55 if (x >= KERNEL_IMAGE_SIZE)
56 return false;
57 x += phys_base;
58 } else {
59 if (x < PAGE_OFFSET)
60 return false;
61 x -= PAGE_OFFSET;
ed26dbe5 62 if (!phys_addr_valid(x))
af5c2bd1 63 return false;
af5c2bd1
VN
64 }
65
66 return pfn_valid(x >> PAGE_SHIFT);
67}
68EXPORT_SYMBOL(__virt_addr_valid);
69
e3100c82
TG
70#else
71
a1bf9631 72#ifdef CONFIG_DEBUG_VIRTUAL
59ea7463
JS
73unsigned long __phys_addr(unsigned long x)
74{
dc16ecf7 75 /* VMALLOC_* aren't constants */
af5c2bd1 76 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
dc16ecf7 77 VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
59ea7463
JS
78 return x - PAGE_OFFSET;
79}
80EXPORT_SYMBOL(__phys_addr);
a1bf9631 81#endif
59ea7463 82
af5c2bd1
VN
83bool __virt_addr_valid(unsigned long x)
84{
85 if (x < PAGE_OFFSET)
86 return false;
dc16ecf7 87 if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
af5c2bd1 88 return false;
0feca851
JF
89 if (x >= FIXADDR_START)
90 return false;
af5c2bd1
VN
91 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
92}
93EXPORT_SYMBOL(__virt_addr_valid);
94
240d3a7c
TG
95#endif
96
5f5192b9
TG
97int page_is_ram(unsigned long pagenr)
98{
756a6c68 99 resource_size_t addr, end;
5f5192b9
TG
100 int i;
101
d8a9e6a5
AV
102 /*
103 * A special case is the first 4Kb of memory;
104 * This is a BIOS owned area, not kernel ram, but generally
105 * not listed as such in the E820 table.
106 */
107 if (pagenr == 0)
108 return 0;
109
156fbc3f
AV
110 /*
111 * Second special case: Some BIOSen report the PC BIOS
112 * area (640->1Mb) as ram even though it is not.
113 */
114 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
115 pagenr < (BIOS_END >> PAGE_SHIFT))
116 return 0;
d8a9e6a5 117
5f5192b9
TG
118 for (i = 0; i < e820.nr_map; i++) {
119 /*
120 * Not usable memory:
121 */
122 if (e820.map[i].type != E820_RAM)
123 continue;
5f5192b9
TG
124 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
125 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
950f9d95 126
950f9d95 127
5f5192b9
TG
128 if ((pagenr >= addr) && (pagenr < end))
129 return 1;
130 }
131 return 0;
132}
133
e9332cac
TG
134/*
135 * Fix up the linear direct mapping of the kernel to avoid cache attribute
136 * conflicts.
137 */
3a96ce8c 138int ioremap_change_attr(unsigned long vaddr, unsigned long size,
139 unsigned long prot_val)
e9332cac 140{
d806e5ee 141 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 142 int err;
e9332cac 143
3a96ce8c 144 switch (prot_val) {
145 case _PAGE_CACHE_UC:
d806e5ee 146 default:
1219333d 147 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 148 break;
b310f381 149 case _PAGE_CACHE_WC:
150 err = _set_memory_wc(vaddr, nrpages);
151 break;
3a96ce8c 152 case _PAGE_CACHE_WB:
1219333d 153 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
154 break;
155 }
e9332cac
TG
156
157 return err;
158}
159
1da177e4
LT
160/*
161 * Remap an arbitrary physical address space into the kernel virtual
162 * address space. Needed when the kernel wants to access high addresses
163 * directly.
164 *
165 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
166 * have to convert them into an offset in a page-aligned mapping, but the
167 * caller shouldn't need to know that small detail.
168 */
23016969
CL
169static void __iomem *__ioremap_caller(resource_size_t phys_addr,
170 unsigned long size, unsigned long prot_val, void *caller)
1da177e4 171{
756a6c68
IM
172 unsigned long pfn, offset, vaddr;
173 resource_size_t last_addr;
87e547fe
PP
174 const resource_size_t unaligned_phys_addr = phys_addr;
175 const unsigned long unaligned_size = size;
91eebf40 176 struct vm_struct *area;
d7677d40 177 unsigned long new_prot_val;
d806e5ee 178 pgprot_t prot;
dee7cbb2 179 int retval;
d61fc448 180 void __iomem *ret_addr;
1da177e4
LT
181
182 /* Don't allow wraparound or zero size */
183 last_addr = phys_addr + size - 1;
184 if (!size || last_addr < phys_addr)
185 return NULL;
186
e3100c82 187 if (!phys_addr_valid(phys_addr)) {
6997ab49 188 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 189 (unsigned long long)phys_addr);
e3100c82
TG
190 WARN_ON_ONCE(1);
191 return NULL;
192 }
193
1da177e4
LT
194 /*
195 * Don't remap the low PCI/ISA area, it's always mapped..
196 */
bcc643dc 197 if (is_ISA_range(phys_addr, last_addr))
4b40fcee 198 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4 199
379daf62
SS
200 /*
201 * Check if the request spans more than any BAR in the iomem resource
202 * tree.
203 */
8808500f
IM
204 WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
205 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
379daf62 206
1da177e4
LT
207 /*
208 * Don't allow anybody to remap normal RAM that we're using..
209 */
cb8ab687
AS
210 for (pfn = phys_addr >> PAGE_SHIFT;
211 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
212 pfn++) {
bdd3cee2 213
ba748d22
IM
214 int is_ram = page_is_ram(pfn);
215
216 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
266b9f87 217 return NULL;
ba748d22 218 WARN_ON_ONCE(is_ram);
1da177e4
LT
219 }
220
d7677d40 221 /*
222 * Mappings have to be page-aligned
223 */
224 offset = phys_addr & ~PAGE_MASK;
225 phys_addr &= PAGE_MASK;
226 size = PAGE_ALIGN(last_addr+1) - phys_addr;
227
e213e877 228 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
dee7cbb2
VP
229 prot_val, &new_prot_val);
230 if (retval) {
b450e5e8 231 pr_debug("Warning: reserve_memtype returned %d\n", retval);
dee7cbb2
VP
232 return NULL;
233 }
234
235 if (prot_val != new_prot_val) {
d7677d40 236 /*
237 * Do not fallback to certain memory types with certain
238 * requested type:
de33c442
SS
239 * - request is uc-, return cannot be write-back
240 * - request is uc-, return cannot be write-combine
b310f381 241 * - request is write-combine, return cannot be write-back
d7677d40 242 */
de33c442 243 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
b310f381 244 (new_prot_val == _PAGE_CACHE_WB ||
245 new_prot_val == _PAGE_CACHE_WC)) ||
246 (prot_val == _PAGE_CACHE_WC &&
d7677d40 247 new_prot_val == _PAGE_CACHE_WB)) {
b450e5e8 248 pr_debug(
6997ab49 249 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
4c8337ac
RD
250 (unsigned long long)phys_addr,
251 (unsigned long long)(phys_addr + size),
6997ab49 252 prot_val, new_prot_val);
d7677d40 253 free_memtype(phys_addr, phys_addr + size);
254 return NULL;
255 }
256 prot_val = new_prot_val;
257 }
258
3a96ce8c 259 switch (prot_val) {
260 case _PAGE_CACHE_UC:
d806e5ee 261 default:
be43d728 262 prot = PAGE_KERNEL_IO_NOCACHE;
d806e5ee 263 break;
de33c442 264 case _PAGE_CACHE_UC_MINUS:
be43d728 265 prot = PAGE_KERNEL_IO_UC_MINUS;
de33c442 266 break;
b310f381 267 case _PAGE_CACHE_WC:
be43d728 268 prot = PAGE_KERNEL_IO_WC;
b310f381 269 break;
3a96ce8c 270 case _PAGE_CACHE_WB:
be43d728 271 prot = PAGE_KERNEL_IO;
d806e5ee
TG
272 break;
273 }
a148ecfd 274
1da177e4
LT
275 /*
276 * Ok, go for it..
277 */
23016969 278 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4
LT
279 if (!area)
280 return NULL;
281 area->phys_addr = phys_addr;
e66aadbe 282 vaddr = (unsigned long) area->addr;
43a432b1
SS
283
284 if (kernel_map_sync_memtype(phys_addr, size, prot_val)) {
d7677d40 285 free_memtype(phys_addr, phys_addr + size);
b16bf712 286 free_vm_area(area);
1da177e4
LT
287 return NULL;
288 }
e9332cac 289
43a432b1 290 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
d7677d40 291 free_memtype(phys_addr, phys_addr + size);
43a432b1 292 free_vm_area(area);
e9332cac
TG
293 return NULL;
294 }
295
d61fc448 296 ret_addr = (void __iomem *) (vaddr + offset);
87e547fe 297 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
d61fc448
PP
298
299 return ret_addr;
1da177e4 300}
1da177e4
LT
301
302/**
303 * ioremap_nocache - map bus memory into CPU space
304 * @offset: bus address of the memory
305 * @size: size of the resource to map
306 *
307 * ioremap_nocache performs a platform specific sequence of operations to
308 * make bus memory CPU accessible via the readb/readw/readl/writeb/
309 * writew/writel functions and the other mmio helpers. The returned
310 * address is not guaranteed to be usable directly as a virtual
91eebf40 311 * address.
1da177e4
LT
312 *
313 * This version of ioremap ensures that the memory is marked uncachable
314 * on the CPU as well as honouring existing caching rules from things like
91eebf40 315 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
316 * busses. In particular driver authors should read up on PCI writes
317 *
318 * It's useful if some control registers are in such an area and
319 * write combining or read caching is not desirable:
91eebf40 320 *
1da177e4
LT
321 * Must be freed with iounmap.
322 */
b9e76a00 323void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 324{
de33c442
SS
325 /*
326 * Ideally, this should be:
499f8f84 327 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
de33c442
SS
328 *
329 * Till we fix all X drivers to use ioremap_wc(), we will use
330 * UC MINUS.
331 */
332 unsigned long val = _PAGE_CACHE_UC_MINUS;
333
334 return __ioremap_caller(phys_addr, size, val,
23016969 335 __builtin_return_address(0));
1da177e4 336}
129f6946 337EXPORT_SYMBOL(ioremap_nocache);
1da177e4 338
b310f381 339/**
340 * ioremap_wc - map memory into CPU space write combined
341 * @offset: bus address of the memory
342 * @size: size of the resource to map
343 *
344 * This version of ioremap ensures that the memory is marked write combining.
345 * Write combining allows faster writes to some hardware devices.
346 *
347 * Must be freed with iounmap.
348 */
d639bab8 349void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
b310f381 350{
499f8f84 351 if (pat_enabled)
23016969
CL
352 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
353 __builtin_return_address(0));
b310f381 354 else
355 return ioremap_nocache(phys_addr, size);
356}
357EXPORT_SYMBOL(ioremap_wc);
358
b9e76a00 359void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 360{
23016969
CL
361 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
362 __builtin_return_address(0));
5f868152
TG
363}
364EXPORT_SYMBOL(ioremap_cache);
365
a361ee5c
VP
366static void __iomem *ioremap_default(resource_size_t phys_addr,
367 unsigned long size)
368{
369 unsigned long flags;
1d6cf1fe 370 void __iomem *ret;
a361ee5c
VP
371 int err;
372
373 /*
374 * - WB for WB-able memory and no other conflicting mappings
375 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
376 * - Inherit from confliting mappings otherwise
377 */
b6ff32d9
SS
378 err = reserve_memtype(phys_addr, phys_addr + size,
379 _PAGE_CACHE_WB, &flags);
a361ee5c
VP
380 if (err < 0)
381 return NULL;
382
1d6cf1fe
HH
383 ret = __ioremap_caller(phys_addr, size, flags,
384 __builtin_return_address(0));
a361ee5c
VP
385
386 free_memtype(phys_addr, phys_addr + size);
1d6cf1fe 387 return ret;
a361ee5c
VP
388}
389
28b2ee20
RR
390void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
391 unsigned long prot_val)
392{
393 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
394 __builtin_return_address(0));
395}
396EXPORT_SYMBOL(ioremap_prot);
397
bf5421c3
AK
398/**
399 * iounmap - Free a IO remapping
400 * @addr: virtual address from ioremap_*
401 *
402 * Caller must ensure there is only one unmapping for the same pointer.
403 */
1da177e4
LT
404void iounmap(volatile void __iomem *addr)
405{
bf5421c3 406 struct vm_struct *p, *o;
c23a4e96
AM
407
408 if ((void __force *)addr <= high_memory)
1da177e4
LT
409 return;
410
411 /*
412 * __ioremap special-cases the PCI/ISA range by not instantiating a
413 * vm_area and by simply returning an address into the kernel mapping
414 * of ISA space. So handle that here.
415 */
6e92a5a6
TG
416 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
417 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
418 return;
419
91eebf40
TG
420 addr = (volatile void __iomem *)
421 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3 422
d61fc448
PP
423 mmiotrace_iounmap(addr);
424
bf5421c3
AK
425 /* Use the vm area unlocked, assuming the caller
426 ensures there isn't another iounmap for the same address
427 in parallel. Reuse of the virtual address is prevented by
428 leaving it in the global lists until we're done with it.
429 cpa takes care of the direct mappings. */
430 read_lock(&vmlist_lock);
431 for (p = vmlist; p; p = p->next) {
6e92a5a6 432 if (p->addr == (void __force *)addr)
bf5421c3
AK
433 break;
434 }
435 read_unlock(&vmlist_lock);
436
437 if (!p) {
91eebf40 438 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 439 dump_stack();
bf5421c3 440 return;
1da177e4
LT
441 }
442
d7677d40 443 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
444
bf5421c3 445 /* Finally remove it */
6e92a5a6 446 o = remove_vm_area((void __force *)addr);
bf5421c3 447 BUG_ON(p != o || o == NULL);
91eebf40 448 kfree(p);
1da177e4 449}
129f6946 450EXPORT_SYMBOL(iounmap);
1da177e4 451
e045fb2a 452/*
453 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
454 * access
455 */
456void *xlate_dev_mem_ptr(unsigned long phys)
457{
458 void *addr;
459 unsigned long start = phys & PAGE_MASK;
460
461 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
462 if (page_is_ram(start >> PAGE_SHIFT))
463 return __va(phys);
464
ae94b807 465 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
e045fb2a 466 if (addr)
467 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
468
469 return addr;
470}
471
472void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
473{
474 if (page_is_ram(phys >> PAGE_SHIFT))
475 return;
476
477 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
478 return;
479}
480
4b6e9f27 481static int __initdata early_ioremap_debug;
d18d6d65
IM
482
483static int __init early_ioremap_debug_setup(char *str)
484{
485 early_ioremap_debug = 1;
486
793b24a2 487 return 0;
d18d6d65 488}
793b24a2 489early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 490
0947b2f3 491static __initdata int after_paging_init;
45c7b28f 492static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
0947b2f3 493
551889a6 494static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 495{
37cc8d7f
JF
496 /* Don't assume we're using swapper_pg_dir at this point */
497 pgd_t *base = __va(read_cr3());
498 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
499 pud_t *pud = pud_offset(pgd, addr);
500 pmd_t *pmd = pmd_offset(pud, addr);
501
502 return pmd;
0947b2f3
HY
503}
504
551889a6 505static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 506{
551889a6 507 return &bm_pte[pte_index(addr)];
0947b2f3
HY
508}
509
8827247f
WC
510static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
511
beacfaac 512void __init early_ioremap_init(void)
0947b2f3 513{
551889a6 514 pmd_t *pmd;
8827247f 515 int i;
0947b2f3 516
d18d6d65 517 if (early_ioremap_debug)
adafdf6a 518 printk(KERN_INFO "early_ioremap_init()\n");
d18d6d65 519
8827247f 520 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
9f4f25c8 521 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
8827247f 522
551889a6 523 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
45c7b28f
JF
524 memset(bm_pte, 0, sizeof(bm_pte));
525 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 526
0e3a9549 527 /*
551889a6 528 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
529 * we are not prepared:
530 */
551889a6 531 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 532 WARN_ON(1);
551889a6
IC
533 printk(KERN_WARNING "pmd %p != %p\n",
534 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 535 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 536 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 537 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 538 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
539
540 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
541 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
542 FIX_BTMAP_BEGIN);
0e3a9549 543 }
0947b2f3
HY
544}
545
beacfaac 546void __init early_ioremap_reset(void)
0947b2f3 547{
0947b2f3 548 after_paging_init = 1;
0947b2f3
HY
549}
550
beacfaac 551static void __init __early_set_fixmap(enum fixed_addresses idx,
9b987aeb 552 phys_addr_t phys, pgprot_t flags)
0947b2f3 553{
551889a6
IC
554 unsigned long addr = __fix_to_virt(idx);
555 pte_t *pte;
0947b2f3
HY
556
557 if (idx >= __end_of_fixed_addresses) {
558 BUG();
559 return;
560 }
beacfaac 561 pte = early_ioremap_pte(addr);
4583ed51 562
0947b2f3 563 if (pgprot_val(flags))
551889a6 564 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 565 else
4f9c11dd 566 pte_clear(&init_mm, addr, pte);
0947b2f3
HY
567 __flush_tlb_one(addr);
568}
569
beacfaac 570static inline void __init early_set_fixmap(enum fixed_addresses idx,
9b987aeb 571 phys_addr_t phys, pgprot_t prot)
0947b2f3
HY
572{
573 if (after_paging_init)
14941779 574 __set_fixmap(idx, phys, prot);
0947b2f3 575 else
14941779 576 __early_set_fixmap(idx, phys, prot);
0947b2f3
HY
577}
578
beacfaac 579static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
580{
581 if (after_paging_init)
582 clear_fixmap(idx);
583 else
beacfaac 584 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
585}
586
1d6cf1fe 587static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
c1a2f4b1 588static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
8827247f 589
d690b2af
IM
590static int __init check_early_ioremap_leak(void)
591{
c1a2f4b1
YL
592 int count = 0;
593 int i;
594
595 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
596 if (prev_map[i])
597 count++;
598
599 if (!count)
d690b2af 600 return 0;
0c072bb4 601 WARN(1, KERN_WARNING
91eebf40 602 "Debug warning: early ioremap leak of %d areas detected.\n",
c1a2f4b1 603 count);
d690b2af 604 printk(KERN_WARNING
0c072bb4 605 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
606
607 return 1;
608}
609late_initcall(check_early_ioremap_leak);
610
8827247f 611static void __init __iomem *
9b987aeb 612__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
1da177e4 613{
9b987aeb
MH
614 unsigned long offset;
615 resource_size_t last_addr;
c1a2f4b1 616 unsigned int nrpages;
1b42f516 617 enum fixed_addresses idx0, idx;
c1a2f4b1 618 int i, slot;
1b42f516
IM
619
620 WARN_ON(system_state != SYSTEM_BOOTING);
621
c1a2f4b1
YL
622 slot = -1;
623 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
624 if (!prev_map[i]) {
625 slot = i;
626 break;
627 }
628 }
629
630 if (slot < 0) {
9b987aeb
MH
631 printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
632 (u64)phys_addr, size);
c1a2f4b1
YL
633 WARN_ON(1);
634 return NULL;
635 }
636
d18d6d65 637 if (early_ioremap_debug) {
9b987aeb
MH
638 printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
639 (u64)phys_addr, size, slot);
d18d6d65
IM
640 dump_stack();
641 }
1da177e4
LT
642
643 /* Don't allow wraparound or zero size */
644 last_addr = phys_addr + size - 1;
bd796ed0
IM
645 if (!size || last_addr < phys_addr) {
646 WARN_ON(1);
1da177e4 647 return NULL;
bd796ed0 648 }
1da177e4 649
c1a2f4b1 650 prev_size[slot] = size;
1da177e4
LT
651 /*
652 * Mappings have to be page-aligned
653 */
654 offset = phys_addr & ~PAGE_MASK;
655 phys_addr &= PAGE_MASK;
c613ec1a 656 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
1da177e4
LT
657
658 /*
659 * Mappings have to fit in the FIX_BTMAP area.
660 */
661 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
662 if (nrpages > NR_FIX_BTMAPS) {
663 WARN_ON(1);
1da177e4 664 return NULL;
bd796ed0 665 }
1da177e4
LT
666
667 /*
668 * Ok, go for it..
669 */
c1a2f4b1 670 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
1b42f516 671 idx = idx0;
1da177e4 672 while (nrpages > 0) {
14941779 673 early_set_fixmap(idx, phys_addr, prot);
1da177e4
LT
674 phys_addr += PAGE_SIZE;
675 --idx;
676 --nrpages;
677 }
d18d6d65 678 if (early_ioremap_debug)
8827247f 679 printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
1b42f516 680
8827247f 681 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
c1a2f4b1 682 return prev_map[slot];
1da177e4
LT
683}
684
14941779 685/* Remap an IO device */
9b987aeb
MH
686void __init __iomem *
687early_ioremap(resource_size_t phys_addr, unsigned long size)
14941779
JF
688{
689 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
690}
691
692/* Remap memory */
9b987aeb
MH
693void __init __iomem *
694early_memremap(resource_size_t phys_addr, unsigned long size)
14941779
JF
695{
696 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
697}
698
1d6cf1fe 699void __init early_iounmap(void __iomem *addr, unsigned long size)
1da177e4
LT
700{
701 unsigned long virt_addr;
702 unsigned long offset;
703 unsigned int nrpages;
704 enum fixed_addresses idx;
c1a2f4b1
YL
705 int i, slot;
706
707 slot = -1;
708 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
709 if (prev_map[i] == addr) {
710 slot = i;
711 break;
712 }
713 }
1b42f516 714
c1a2f4b1
YL
715 if (slot < 0) {
716 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
717 addr, size);
718 WARN_ON(1);
719 return;
720 }
721
722 if (prev_size[slot] != size) {
723 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
724 addr, size, slot, prev_size[slot]);
725 WARN_ON(1);
226e9a93 726 return;
c1a2f4b1 727 }
1da177e4 728
d18d6d65 729 if (early_ioremap_debug) {
adafdf6a 730 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
c1a2f4b1 731 size, slot);
d18d6d65
IM
732 dump_stack();
733 }
734
1da177e4 735 virt_addr = (unsigned long)addr;
bd796ed0
IM
736 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
737 WARN_ON(1);
1da177e4 738 return;
bd796ed0 739 }
1da177e4
LT
740 offset = virt_addr & ~PAGE_MASK;
741 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
742
c1a2f4b1 743 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
1da177e4 744 while (nrpages > 0) {
beacfaac 745 early_clear_fixmap(idx);
1da177e4
LT
746 --idx;
747 --nrpages;
748 }
1d6cf1fe 749 prev_map[slot] = NULL;
1da177e4 750}