x86: allow number of additional hotplug CPUs to be set at compile time, V2
[linux-2.6-block.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
d61fc448 15#include <linux/mmiotrace.h>
3cbd09e4 16
1da177e4 17#include <asm/cacheflush.h>
3cbd09e4
TG
18#include <asm/e820.h>
19#include <asm/fixmap.h>
1da177e4 20#include <asm/pgtable.h>
3cbd09e4 21#include <asm/tlbflush.h>
f6df72e7 22#include <asm/pgalloc.h>
d7677d40 23#include <asm/pat.h>
1da177e4 24
240d3a7c
TG
25#ifdef CONFIG_X86_64
26
59ea7463 27static inline int phys_addr_valid(unsigned long addr)
240d3a7c 28{
59ea7463 29 return addr < (1UL << boot_cpu_data.x86_phys_bits);
240d3a7c 30}
240d3a7c 31
59ea7463 32unsigned long __phys_addr(unsigned long x)
e3100c82 33{
59ea7463
JS
34 if (x >= __START_KERNEL_map) {
35 x -= __START_KERNEL_map;
36 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
37 x += phys_base;
38 } else {
39 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
40 x -= PAGE_OFFSET;
41 VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
42 !phys_addr_valid(x));
43 }
44 return x;
e3100c82 45}
59ea7463 46EXPORT_SYMBOL(__phys_addr);
e3100c82
TG
47
48#else
49
50static inline int phys_addr_valid(unsigned long addr)
51{
52 return 1;
53}
54
a1bf9631 55#ifdef CONFIG_DEBUG_VIRTUAL
59ea7463
JS
56unsigned long __phys_addr(unsigned long x)
57{
58 /* VMALLOC_* aren't constants; not available at the boot time */
59 VIRTUAL_BUG_ON(x < PAGE_OFFSET || (system_state != SYSTEM_BOOTING &&
60 is_vmalloc_addr((void *)x)));
61 return x - PAGE_OFFSET;
62}
63EXPORT_SYMBOL(__phys_addr);
a1bf9631 64#endif
59ea7463 65
240d3a7c
TG
66#endif
67
5f5192b9
TG
68int page_is_ram(unsigned long pagenr)
69{
756a6c68 70 resource_size_t addr, end;
5f5192b9
TG
71 int i;
72
d8a9e6a5
AV
73 /*
74 * A special case is the first 4Kb of memory;
75 * This is a BIOS owned area, not kernel ram, but generally
76 * not listed as such in the E820 table.
77 */
78 if (pagenr == 0)
79 return 0;
80
156fbc3f
AV
81 /*
82 * Second special case: Some BIOSen report the PC BIOS
83 * area (640->1Mb) as ram even though it is not.
84 */
85 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
86 pagenr < (BIOS_END >> PAGE_SHIFT))
87 return 0;
d8a9e6a5 88
5f5192b9
TG
89 for (i = 0; i < e820.nr_map; i++) {
90 /*
91 * Not usable memory:
92 */
93 if (e820.map[i].type != E820_RAM)
94 continue;
5f5192b9
TG
95 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
96 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
950f9d95 97
950f9d95 98
5f5192b9
TG
99 if ((pagenr >= addr) && (pagenr < end))
100 return 1;
101 }
102 return 0;
103}
104
9542ada8
SS
105int pagerange_is_ram(unsigned long start, unsigned long end)
106{
107 int ram_page = 0, not_rampage = 0;
108 unsigned long page_nr;
109
110 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
111 ++page_nr) {
112 if (page_is_ram(page_nr))
113 ram_page = 1;
114 else
115 not_rampage = 1;
116
117 if (ram_page == not_rampage)
118 return -1;
119 }
120
121 return ram_page;
122}
123
e9332cac
TG
124/*
125 * Fix up the linear direct mapping of the kernel to avoid cache attribute
126 * conflicts.
127 */
3a96ce8c 128int ioremap_change_attr(unsigned long vaddr, unsigned long size,
129 unsigned long prot_val)
e9332cac 130{
d806e5ee 131 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 132 int err;
e9332cac 133
3a96ce8c 134 switch (prot_val) {
135 case _PAGE_CACHE_UC:
d806e5ee 136 default:
1219333d 137 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 138 break;
b310f381 139 case _PAGE_CACHE_WC:
140 err = _set_memory_wc(vaddr, nrpages);
141 break;
3a96ce8c 142 case _PAGE_CACHE_WB:
1219333d 143 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
144 break;
145 }
e9332cac
TG
146
147 return err;
148}
149
1da177e4
LT
150/*
151 * Remap an arbitrary physical address space into the kernel virtual
152 * address space. Needed when the kernel wants to access high addresses
153 * directly.
154 *
155 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
156 * have to convert them into an offset in a page-aligned mapping, but the
157 * caller shouldn't need to know that small detail.
158 */
23016969
CL
159static void __iomem *__ioremap_caller(resource_size_t phys_addr,
160 unsigned long size, unsigned long prot_val, void *caller)
1da177e4 161{
756a6c68
IM
162 unsigned long pfn, offset, vaddr;
163 resource_size_t last_addr;
87e547fe
PP
164 const resource_size_t unaligned_phys_addr = phys_addr;
165 const unsigned long unaligned_size = size;
91eebf40 166 struct vm_struct *area;
d7677d40 167 unsigned long new_prot_val;
d806e5ee 168 pgprot_t prot;
dee7cbb2 169 int retval;
d61fc448 170 void __iomem *ret_addr;
1da177e4
LT
171
172 /* Don't allow wraparound or zero size */
173 last_addr = phys_addr + size - 1;
174 if (!size || last_addr < phys_addr)
175 return NULL;
176
e3100c82 177 if (!phys_addr_valid(phys_addr)) {
6997ab49 178 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 179 (unsigned long long)phys_addr);
e3100c82
TG
180 WARN_ON_ONCE(1);
181 return NULL;
182 }
183
1da177e4
LT
184 /*
185 * Don't remap the low PCI/ISA area, it's always mapped..
186 */
bcc643dc 187 if (is_ISA_range(phys_addr, last_addr))
4b40fcee 188 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
189
190 /*
191 * Don't allow anybody to remap normal RAM that we're using..
192 */
cb8ab687
AS
193 for (pfn = phys_addr >> PAGE_SHIFT;
194 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
195 pfn++) {
bdd3cee2 196
ba748d22
IM
197 int is_ram = page_is_ram(pfn);
198
199 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
266b9f87 200 return NULL;
ba748d22 201 WARN_ON_ONCE(is_ram);
1da177e4
LT
202 }
203
d7677d40 204 /*
205 * Mappings have to be page-aligned
206 */
207 offset = phys_addr & ~PAGE_MASK;
208 phys_addr &= PAGE_MASK;
209 size = PAGE_ALIGN(last_addr+1) - phys_addr;
210
e213e877 211 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
dee7cbb2
VP
212 prot_val, &new_prot_val);
213 if (retval) {
b450e5e8 214 pr_debug("Warning: reserve_memtype returned %d\n", retval);
dee7cbb2
VP
215 return NULL;
216 }
217
218 if (prot_val != new_prot_val) {
d7677d40 219 /*
220 * Do not fallback to certain memory types with certain
221 * requested type:
de33c442
SS
222 * - request is uc-, return cannot be write-back
223 * - request is uc-, return cannot be write-combine
b310f381 224 * - request is write-combine, return cannot be write-back
d7677d40 225 */
de33c442 226 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
b310f381 227 (new_prot_val == _PAGE_CACHE_WB ||
228 new_prot_val == _PAGE_CACHE_WC)) ||
229 (prot_val == _PAGE_CACHE_WC &&
d7677d40 230 new_prot_val == _PAGE_CACHE_WB)) {
b450e5e8 231 pr_debug(
6997ab49 232 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
4c8337ac
RD
233 (unsigned long long)phys_addr,
234 (unsigned long long)(phys_addr + size),
6997ab49 235 prot_val, new_prot_val);
d7677d40 236 free_memtype(phys_addr, phys_addr + size);
237 return NULL;
238 }
239 prot_val = new_prot_val;
240 }
241
3a96ce8c 242 switch (prot_val) {
243 case _PAGE_CACHE_UC:
d806e5ee 244 default:
be43d728 245 prot = PAGE_KERNEL_IO_NOCACHE;
d806e5ee 246 break;
de33c442 247 case _PAGE_CACHE_UC_MINUS:
be43d728 248 prot = PAGE_KERNEL_IO_UC_MINUS;
de33c442 249 break;
b310f381 250 case _PAGE_CACHE_WC:
be43d728 251 prot = PAGE_KERNEL_IO_WC;
b310f381 252 break;
3a96ce8c 253 case _PAGE_CACHE_WB:
be43d728 254 prot = PAGE_KERNEL_IO;
d806e5ee
TG
255 break;
256 }
a148ecfd 257
1da177e4
LT
258 /*
259 * Ok, go for it..
260 */
23016969 261 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4
LT
262 if (!area)
263 return NULL;
264 area->phys_addr = phys_addr;
e66aadbe
TG
265 vaddr = (unsigned long) area->addr;
266 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
d7677d40 267 free_memtype(phys_addr, phys_addr + size);
b16bf712 268 free_vm_area(area);
1da177e4
LT
269 return NULL;
270 }
e9332cac 271
3a96ce8c 272 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
d7677d40 273 free_memtype(phys_addr, phys_addr + size);
e66aadbe 274 vunmap(area->addr);
e9332cac
TG
275 return NULL;
276 }
277
d61fc448 278 ret_addr = (void __iomem *) (vaddr + offset);
87e547fe 279 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
d61fc448
PP
280
281 return ret_addr;
1da177e4 282}
1da177e4
LT
283
284/**
285 * ioremap_nocache - map bus memory into CPU space
286 * @offset: bus address of the memory
287 * @size: size of the resource to map
288 *
289 * ioremap_nocache performs a platform specific sequence of operations to
290 * make bus memory CPU accessible via the readb/readw/readl/writeb/
291 * writew/writel functions and the other mmio helpers. The returned
292 * address is not guaranteed to be usable directly as a virtual
91eebf40 293 * address.
1da177e4
LT
294 *
295 * This version of ioremap ensures that the memory is marked uncachable
296 * on the CPU as well as honouring existing caching rules from things like
91eebf40 297 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
298 * busses. In particular driver authors should read up on PCI writes
299 *
300 * It's useful if some control registers are in such an area and
301 * write combining or read caching is not desirable:
91eebf40 302 *
1da177e4
LT
303 * Must be freed with iounmap.
304 */
b9e76a00 305void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 306{
de33c442
SS
307 /*
308 * Ideally, this should be:
499f8f84 309 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
de33c442
SS
310 *
311 * Till we fix all X drivers to use ioremap_wc(), we will use
312 * UC MINUS.
313 */
314 unsigned long val = _PAGE_CACHE_UC_MINUS;
315
316 return __ioremap_caller(phys_addr, size, val,
23016969 317 __builtin_return_address(0));
1da177e4 318}
129f6946 319EXPORT_SYMBOL(ioremap_nocache);
1da177e4 320
b310f381 321/**
322 * ioremap_wc - map memory into CPU space write combined
323 * @offset: bus address of the memory
324 * @size: size of the resource to map
325 *
326 * This version of ioremap ensures that the memory is marked write combining.
327 * Write combining allows faster writes to some hardware devices.
328 *
329 * Must be freed with iounmap.
330 */
331void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
332{
499f8f84 333 if (pat_enabled)
23016969
CL
334 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
335 __builtin_return_address(0));
b310f381 336 else
337 return ioremap_nocache(phys_addr, size);
338}
339EXPORT_SYMBOL(ioremap_wc);
340
b9e76a00 341void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 342{
23016969
CL
343 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
344 __builtin_return_address(0));
5f868152
TG
345}
346EXPORT_SYMBOL(ioremap_cache);
347
a361ee5c
VP
348static void __iomem *ioremap_default(resource_size_t phys_addr,
349 unsigned long size)
350{
351 unsigned long flags;
352 void *ret;
353 int err;
354
355 /*
356 * - WB for WB-able memory and no other conflicting mappings
357 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
358 * - Inherit from confliting mappings otherwise
359 */
360 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
361 if (err < 0)
362 return NULL;
363
364 ret = (void *) __ioremap_caller(phys_addr, size, flags,
365 __builtin_return_address(0));
366
367 free_memtype(phys_addr, phys_addr + size);
368 return (void __iomem *)ret;
369}
370
28b2ee20
RR
371void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
372 unsigned long prot_val)
373{
374 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
375 __builtin_return_address(0));
376}
377EXPORT_SYMBOL(ioremap_prot);
378
bf5421c3
AK
379/**
380 * iounmap - Free a IO remapping
381 * @addr: virtual address from ioremap_*
382 *
383 * Caller must ensure there is only one unmapping for the same pointer.
384 */
1da177e4
LT
385void iounmap(volatile void __iomem *addr)
386{
bf5421c3 387 struct vm_struct *p, *o;
c23a4e96
AM
388
389 if ((void __force *)addr <= high_memory)
1da177e4
LT
390 return;
391
392 /*
393 * __ioremap special-cases the PCI/ISA range by not instantiating a
394 * vm_area and by simply returning an address into the kernel mapping
395 * of ISA space. So handle that here.
396 */
6e92a5a6
TG
397 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
398 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
399 return;
400
91eebf40
TG
401 addr = (volatile void __iomem *)
402 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3 403
d61fc448
PP
404 mmiotrace_iounmap(addr);
405
bf5421c3
AK
406 /* Use the vm area unlocked, assuming the caller
407 ensures there isn't another iounmap for the same address
408 in parallel. Reuse of the virtual address is prevented by
409 leaving it in the global lists until we're done with it.
410 cpa takes care of the direct mappings. */
411 read_lock(&vmlist_lock);
412 for (p = vmlist; p; p = p->next) {
6e92a5a6 413 if (p->addr == (void __force *)addr)
bf5421c3
AK
414 break;
415 }
416 read_unlock(&vmlist_lock);
417
418 if (!p) {
91eebf40 419 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 420 dump_stack();
bf5421c3 421 return;
1da177e4
LT
422 }
423
d7677d40 424 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
425
bf5421c3 426 /* Finally remove it */
6e92a5a6 427 o = remove_vm_area((void __force *)addr);
bf5421c3 428 BUG_ON(p != o || o == NULL);
91eebf40 429 kfree(p);
1da177e4 430}
129f6946 431EXPORT_SYMBOL(iounmap);
1da177e4 432
e045fb2a 433/*
434 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
435 * access
436 */
437void *xlate_dev_mem_ptr(unsigned long phys)
438{
439 void *addr;
440 unsigned long start = phys & PAGE_MASK;
441
442 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
443 if (page_is_ram(start >> PAGE_SHIFT))
444 return __va(phys);
445
ae94b807 446 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
e045fb2a 447 if (addr)
448 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
449
450 return addr;
451}
452
453void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
454{
455 if (page_is_ram(phys >> PAGE_SHIFT))
456 return;
457
458 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
459 return;
460}
461
4b6e9f27 462static int __initdata early_ioremap_debug;
d18d6d65
IM
463
464static int __init early_ioremap_debug_setup(char *str)
465{
466 early_ioremap_debug = 1;
467
793b24a2 468 return 0;
d18d6d65 469}
793b24a2 470early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 471
0947b2f3 472static __initdata int after_paging_init;
a7bf0bd5 473static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
0947b2f3 474
551889a6 475static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 476{
37cc8d7f
JF
477 /* Don't assume we're using swapper_pg_dir at this point */
478 pgd_t *base = __va(read_cr3());
479 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
480 pud_t *pud = pud_offset(pgd, addr);
481 pmd_t *pmd = pmd_offset(pud, addr);
482
483 return pmd;
0947b2f3
HY
484}
485
551889a6 486static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 487{
551889a6 488 return &bm_pte[pte_index(addr)];
0947b2f3
HY
489}
490
beacfaac 491void __init early_ioremap_init(void)
0947b2f3 492{
551889a6 493 pmd_t *pmd;
0947b2f3 494
d18d6d65 495 if (early_ioremap_debug)
adafdf6a 496 printk(KERN_INFO "early_ioremap_init()\n");
d18d6d65 497
551889a6 498 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3 499 memset(bm_pte, 0, sizeof(bm_pte));
b6fbb669 500 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 501
0e3a9549 502 /*
551889a6 503 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
504 * we are not prepared:
505 */
551889a6 506 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 507 WARN_ON(1);
551889a6
IC
508 printk(KERN_WARNING "pmd %p != %p\n",
509 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 510 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 511 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 512 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 513 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
514
515 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
516 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
517 FIX_BTMAP_BEGIN);
0e3a9549 518 }
0947b2f3
HY
519}
520
beacfaac 521void __init early_ioremap_clear(void)
0947b2f3 522{
551889a6 523 pmd_t *pmd;
0947b2f3 524
d18d6d65 525 if (early_ioremap_debug)
adafdf6a 526 printk(KERN_INFO "early_ioremap_clear()\n");
d18d6d65 527
551889a6
IC
528 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
529 pmd_clear(pmd);
6944a9c8 530 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
0947b2f3
HY
531 __flush_tlb_all();
532}
533
beacfaac 534void __init early_ioremap_reset(void)
0947b2f3
HY
535{
536 enum fixed_addresses idx;
551889a6
IC
537 unsigned long addr, phys;
538 pte_t *pte;
0947b2f3
HY
539
540 after_paging_init = 1;
64a8f852 541 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 542 addr = fix_to_virt(idx);
beacfaac 543 pte = early_ioremap_pte(addr);
551889a6
IC
544 if (pte_present(*pte)) {
545 phys = pte_val(*pte) & PAGE_MASK;
0947b2f3
HY
546 set_fixmap(idx, phys);
547 }
548 }
549}
550
beacfaac 551static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
552 unsigned long phys, pgprot_t flags)
553{
551889a6
IC
554 unsigned long addr = __fix_to_virt(idx);
555 pte_t *pte;
0947b2f3
HY
556
557 if (idx >= __end_of_fixed_addresses) {
558 BUG();
559 return;
560 }
beacfaac 561 pte = early_ioremap_pte(addr);
4583ed51 562
0947b2f3 563 if (pgprot_val(flags))
551889a6 564 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 565 else
4f9c11dd 566 pte_clear(&init_mm, addr, pte);
0947b2f3
HY
567 __flush_tlb_one(addr);
568}
569
beacfaac 570static inline void __init early_set_fixmap(enum fixed_addresses idx,
14941779 571 unsigned long phys, pgprot_t prot)
0947b2f3
HY
572{
573 if (after_paging_init)
14941779 574 __set_fixmap(idx, phys, prot);
0947b2f3 575 else
14941779 576 __early_set_fixmap(idx, phys, prot);
0947b2f3
HY
577}
578
beacfaac 579static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
580{
581 if (after_paging_init)
582 clear_fixmap(idx);
583 else
beacfaac 584 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
585}
586
1b42f516 587
4b6e9f27 588static int __initdata early_ioremap_nested;
1b42f516 589
d690b2af
IM
590static int __init check_early_ioremap_leak(void)
591{
592 if (!early_ioremap_nested)
593 return 0;
0c072bb4 594 WARN(1, KERN_WARNING
91eebf40 595 "Debug warning: early ioremap leak of %d areas detected.\n",
0c072bb4 596 early_ioremap_nested);
d690b2af 597 printk(KERN_WARNING
0c072bb4 598 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
599
600 return 1;
601}
602late_initcall(check_early_ioremap_leak);
603
14941779 604static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
1da177e4
LT
605{
606 unsigned long offset, last_addr;
1b42f516
IM
607 unsigned int nrpages, nesting;
608 enum fixed_addresses idx0, idx;
609
610 WARN_ON(system_state != SYSTEM_BOOTING);
611
612 nesting = early_ioremap_nested;
d18d6d65 613 if (early_ioremap_debug) {
adafdf6a 614 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
91eebf40 615 phys_addr, size, nesting);
d18d6d65
IM
616 dump_stack();
617 }
1da177e4
LT
618
619 /* Don't allow wraparound or zero size */
620 last_addr = phys_addr + size - 1;
bd796ed0
IM
621 if (!size || last_addr < phys_addr) {
622 WARN_ON(1);
1da177e4 623 return NULL;
bd796ed0 624 }
1da177e4 625
bd796ed0
IM
626 if (nesting >= FIX_BTMAPS_NESTING) {
627 WARN_ON(1);
1b42f516 628 return NULL;
bd796ed0 629 }
1b42f516 630 early_ioremap_nested++;
1da177e4
LT
631 /*
632 * Mappings have to be page-aligned
633 */
634 offset = phys_addr & ~PAGE_MASK;
635 phys_addr &= PAGE_MASK;
c613ec1a 636 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
1da177e4
LT
637
638 /*
639 * Mappings have to fit in the FIX_BTMAP area.
640 */
641 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
642 if (nrpages > NR_FIX_BTMAPS) {
643 WARN_ON(1);
1da177e4 644 return NULL;
bd796ed0 645 }
1da177e4
LT
646
647 /*
648 * Ok, go for it..
649 */
1b42f516
IM
650 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
651 idx = idx0;
1da177e4 652 while (nrpages > 0) {
14941779 653 early_set_fixmap(idx, phys_addr, prot);
1da177e4
LT
654 phys_addr += PAGE_SIZE;
655 --idx;
656 --nrpages;
657 }
d18d6d65
IM
658 if (early_ioremap_debug)
659 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 660
91eebf40 661 return (void *) (offset + fix_to_virt(idx0));
1da177e4
LT
662}
663
14941779
JF
664/* Remap an IO device */
665void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
666{
667 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
668}
669
670/* Remap memory */
671void __init *early_memremap(unsigned long phys_addr, unsigned long size)
672{
673 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
674}
675
beacfaac 676void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
677{
678 unsigned long virt_addr;
679 unsigned long offset;
680 unsigned int nrpages;
681 enum fixed_addresses idx;
226e9a93 682 int nesting;
1b42f516
IM
683
684 nesting = --early_ioremap_nested;
226e9a93
IM
685 if (WARN_ON(nesting < 0))
686 return;
1da177e4 687
d18d6d65 688 if (early_ioremap_debug) {
adafdf6a 689 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
91eebf40 690 size, nesting);
d18d6d65
IM
691 dump_stack();
692 }
693
1da177e4 694 virt_addr = (unsigned long)addr;
bd796ed0
IM
695 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
696 WARN_ON(1);
1da177e4 697 return;
bd796ed0 698 }
1da177e4
LT
699 offset = virt_addr & ~PAGE_MASK;
700 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
701
1b42f516 702 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 703 while (nrpages > 0) {
beacfaac 704 early_clear_fixmap(idx);
1da177e4
LT
705 --idx;
706 --nrpages;
707 }
708}
1b42f516
IM
709
710void __this_fixmap_does_not_exist(void)
711{
712 WARN_ON(1);
713}