x86, 64-bit: split x86_64_start_kernel
[linux-2.6-block.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15
1da177e4 16#include <asm/cacheflush.h>
3cbd09e4
TG
17#include <asm/e820.h>
18#include <asm/fixmap.h>
1da177e4 19#include <asm/pgtable.h>
3cbd09e4 20#include <asm/tlbflush.h>
f6df72e7 21#include <asm/pgalloc.h>
d7677d40 22#include <asm/pat.h>
1da177e4 23
240d3a7c
TG
24#ifdef CONFIG_X86_64
25
26unsigned long __phys_addr(unsigned long x)
27{
28 if (x >= __START_KERNEL_map)
29 return x - __START_KERNEL_map + phys_base;
30 return x - PAGE_OFFSET;
31}
32EXPORT_SYMBOL(__phys_addr);
33
e3100c82
TG
34static inline int phys_addr_valid(unsigned long addr)
35{
36 return addr < (1UL << boot_cpu_data.x86_phys_bits);
37}
38
39#else
40
41static inline int phys_addr_valid(unsigned long addr)
42{
43 return 1;
44}
45
240d3a7c
TG
46#endif
47
5f5192b9
TG
48int page_is_ram(unsigned long pagenr)
49{
756a6c68 50 resource_size_t addr, end;
5f5192b9
TG
51 int i;
52
d8a9e6a5
AV
53 /*
54 * A special case is the first 4Kb of memory;
55 * This is a BIOS owned area, not kernel ram, but generally
56 * not listed as such in the E820 table.
57 */
58 if (pagenr == 0)
59 return 0;
60
156fbc3f
AV
61 /*
62 * Second special case: Some BIOSen report the PC BIOS
63 * area (640->1Mb) as ram even though it is not.
64 */
65 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
66 pagenr < (BIOS_END >> PAGE_SHIFT))
67 return 0;
d8a9e6a5 68
5f5192b9
TG
69 for (i = 0; i < e820.nr_map; i++) {
70 /*
71 * Not usable memory:
72 */
73 if (e820.map[i].type != E820_RAM)
74 continue;
5f5192b9
TG
75 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
76 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
950f9d95 77
950f9d95 78
5f5192b9
TG
79 if ((pagenr >= addr) && (pagenr < end))
80 return 1;
81 }
82 return 0;
83}
84
e9332cac
TG
85/*
86 * Fix up the linear direct mapping of the kernel to avoid cache attribute
87 * conflicts.
88 */
3a96ce8c 89int ioremap_change_attr(unsigned long vaddr, unsigned long size,
90 unsigned long prot_val)
e9332cac 91{
d806e5ee 92 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 93 int err;
e9332cac 94
3a96ce8c 95 switch (prot_val) {
96 case _PAGE_CACHE_UC:
d806e5ee 97 default:
1219333d 98 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 99 break;
b310f381 100 case _PAGE_CACHE_WC:
101 err = _set_memory_wc(vaddr, nrpages);
102 break;
3a96ce8c 103 case _PAGE_CACHE_WB:
1219333d 104 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
105 break;
106 }
e9332cac
TG
107
108 return err;
109}
110
1da177e4
LT
111/*
112 * Remap an arbitrary physical address space into the kernel virtual
113 * address space. Needed when the kernel wants to access high addresses
114 * directly.
115 *
116 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117 * have to convert them into an offset in a page-aligned mapping, but the
118 * caller shouldn't need to know that small detail.
119 */
23016969
CL
120static void __iomem *__ioremap_caller(resource_size_t phys_addr,
121 unsigned long size, unsigned long prot_val, void *caller)
1da177e4 122{
756a6c68
IM
123 unsigned long pfn, offset, vaddr;
124 resource_size_t last_addr;
91eebf40 125 struct vm_struct *area;
d7677d40 126 unsigned long new_prot_val;
d806e5ee 127 pgprot_t prot;
dee7cbb2 128 int retval;
1da177e4
LT
129
130 /* Don't allow wraparound or zero size */
131 last_addr = phys_addr + size - 1;
132 if (!size || last_addr < phys_addr)
133 return NULL;
134
e3100c82 135 if (!phys_addr_valid(phys_addr)) {
6997ab49 136 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 137 (unsigned long long)phys_addr);
e3100c82
TG
138 WARN_ON_ONCE(1);
139 return NULL;
140 }
141
1da177e4
LT
142 /*
143 * Don't remap the low PCI/ISA area, it's always mapped..
144 */
bcc643dc 145 if (is_ISA_range(phys_addr, last_addr))
4b40fcee 146 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
147
148 /*
149 * Don't allow anybody to remap normal RAM that we're using..
150 */
cb8ab687
AS
151 for (pfn = phys_addr >> PAGE_SHIFT;
152 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
153 pfn++) {
bdd3cee2 154
ba748d22
IM
155 int is_ram = page_is_ram(pfn);
156
157 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
266b9f87 158 return NULL;
ba748d22 159 WARN_ON_ONCE(is_ram);
1da177e4
LT
160 }
161
d7677d40 162 /*
163 * Mappings have to be page-aligned
164 */
165 offset = phys_addr & ~PAGE_MASK;
166 phys_addr &= PAGE_MASK;
167 size = PAGE_ALIGN(last_addr+1) - phys_addr;
168
dee7cbb2
VP
169 retval = reserve_memtype(phys_addr, phys_addr + size,
170 prot_val, &new_prot_val);
171 if (retval) {
b450e5e8 172 pr_debug("Warning: reserve_memtype returned %d\n", retval);
dee7cbb2
VP
173 return NULL;
174 }
175
176 if (prot_val != new_prot_val) {
d7677d40 177 /*
178 * Do not fallback to certain memory types with certain
179 * requested type:
de33c442
SS
180 * - request is uc-, return cannot be write-back
181 * - request is uc-, return cannot be write-combine
b310f381 182 * - request is write-combine, return cannot be write-back
d7677d40 183 */
de33c442 184 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
b310f381 185 (new_prot_val == _PAGE_CACHE_WB ||
186 new_prot_val == _PAGE_CACHE_WC)) ||
187 (prot_val == _PAGE_CACHE_WC &&
d7677d40 188 new_prot_val == _PAGE_CACHE_WB)) {
b450e5e8 189 pr_debug(
6997ab49 190 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
4c8337ac
RD
191 (unsigned long long)phys_addr,
192 (unsigned long long)(phys_addr + size),
6997ab49 193 prot_val, new_prot_val);
d7677d40 194 free_memtype(phys_addr, phys_addr + size);
195 return NULL;
196 }
197 prot_val = new_prot_val;
198 }
199
3a96ce8c 200 switch (prot_val) {
201 case _PAGE_CACHE_UC:
d806e5ee 202 default:
55c62682 203 prot = PAGE_KERNEL_NOCACHE;
d806e5ee 204 break;
de33c442
SS
205 case _PAGE_CACHE_UC_MINUS:
206 prot = PAGE_KERNEL_UC_MINUS;
207 break;
b310f381 208 case _PAGE_CACHE_WC:
209 prot = PAGE_KERNEL_WC;
210 break;
3a96ce8c 211 case _PAGE_CACHE_WB:
d806e5ee
TG
212 prot = PAGE_KERNEL;
213 break;
214 }
a148ecfd 215
1da177e4
LT
216 /*
217 * Ok, go for it..
218 */
23016969 219 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4
LT
220 if (!area)
221 return NULL;
222 area->phys_addr = phys_addr;
e66aadbe
TG
223 vaddr = (unsigned long) area->addr;
224 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
d7677d40 225 free_memtype(phys_addr, phys_addr + size);
b16bf712 226 free_vm_area(area);
1da177e4
LT
227 return NULL;
228 }
e9332cac 229
3a96ce8c 230 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
d7677d40 231 free_memtype(phys_addr, phys_addr + size);
e66aadbe 232 vunmap(area->addr);
e9332cac
TG
233 return NULL;
234 }
235
e66aadbe 236 return (void __iomem *) (vaddr + offset);
1da177e4 237}
1da177e4
LT
238
239/**
240 * ioremap_nocache - map bus memory into CPU space
241 * @offset: bus address of the memory
242 * @size: size of the resource to map
243 *
244 * ioremap_nocache performs a platform specific sequence of operations to
245 * make bus memory CPU accessible via the readb/readw/readl/writeb/
246 * writew/writel functions and the other mmio helpers. The returned
247 * address is not guaranteed to be usable directly as a virtual
91eebf40 248 * address.
1da177e4
LT
249 *
250 * This version of ioremap ensures that the memory is marked uncachable
251 * on the CPU as well as honouring existing caching rules from things like
91eebf40 252 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
253 * busses. In particular driver authors should read up on PCI writes
254 *
255 * It's useful if some control registers are in such an area and
256 * write combining or read caching is not desirable:
91eebf40 257 *
1da177e4
LT
258 * Must be freed with iounmap.
259 */
b9e76a00 260void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 261{
de33c442
SS
262 /*
263 * Ideally, this should be:
499f8f84 264 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
de33c442
SS
265 *
266 * Till we fix all X drivers to use ioremap_wc(), we will use
267 * UC MINUS.
268 */
269 unsigned long val = _PAGE_CACHE_UC_MINUS;
270
271 return __ioremap_caller(phys_addr, size, val,
23016969 272 __builtin_return_address(0));
1da177e4 273}
129f6946 274EXPORT_SYMBOL(ioremap_nocache);
1da177e4 275
b310f381 276/**
277 * ioremap_wc - map memory into CPU space write combined
278 * @offset: bus address of the memory
279 * @size: size of the resource to map
280 *
281 * This version of ioremap ensures that the memory is marked write combining.
282 * Write combining allows faster writes to some hardware devices.
283 *
284 * Must be freed with iounmap.
285 */
286void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
287{
499f8f84 288 if (pat_enabled)
23016969
CL
289 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
290 __builtin_return_address(0));
b310f381 291 else
292 return ioremap_nocache(phys_addr, size);
293}
294EXPORT_SYMBOL(ioremap_wc);
295
b9e76a00 296void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 297{
23016969
CL
298 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
299 __builtin_return_address(0));
5f868152
TG
300}
301EXPORT_SYMBOL(ioremap_cache);
302
bf5421c3
AK
303/**
304 * iounmap - Free a IO remapping
305 * @addr: virtual address from ioremap_*
306 *
307 * Caller must ensure there is only one unmapping for the same pointer.
308 */
1da177e4
LT
309void iounmap(volatile void __iomem *addr)
310{
bf5421c3 311 struct vm_struct *p, *o;
c23a4e96
AM
312
313 if ((void __force *)addr <= high_memory)
1da177e4
LT
314 return;
315
316 /*
317 * __ioremap special-cases the PCI/ISA range by not instantiating a
318 * vm_area and by simply returning an address into the kernel mapping
319 * of ISA space. So handle that here.
320 */
6e92a5a6
TG
321 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
322 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
323 return;
324
91eebf40
TG
325 addr = (volatile void __iomem *)
326 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
327
328 /* Use the vm area unlocked, assuming the caller
329 ensures there isn't another iounmap for the same address
330 in parallel. Reuse of the virtual address is prevented by
331 leaving it in the global lists until we're done with it.
332 cpa takes care of the direct mappings. */
333 read_lock(&vmlist_lock);
334 for (p = vmlist; p; p = p->next) {
6e92a5a6 335 if (p->addr == (void __force *)addr)
bf5421c3
AK
336 break;
337 }
338 read_unlock(&vmlist_lock);
339
340 if (!p) {
91eebf40 341 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 342 dump_stack();
bf5421c3 343 return;
1da177e4
LT
344 }
345
d7677d40 346 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
347
bf5421c3 348 /* Finally remove it */
6e92a5a6 349 o = remove_vm_area((void __force *)addr);
bf5421c3 350 BUG_ON(p != o || o == NULL);
91eebf40 351 kfree(p);
1da177e4 352}
129f6946 353EXPORT_SYMBOL(iounmap);
1da177e4 354
e045fb2a 355/*
356 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
357 * access
358 */
359void *xlate_dev_mem_ptr(unsigned long phys)
360{
361 void *addr;
362 unsigned long start = phys & PAGE_MASK;
363
364 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
365 if (page_is_ram(start >> PAGE_SHIFT))
366 return __va(phys);
367
6e92a5a6 368 addr = (void __force *)ioremap(start, PAGE_SIZE);
e045fb2a 369 if (addr)
370 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
371
372 return addr;
373}
374
375void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
376{
377 if (page_is_ram(phys >> PAGE_SHIFT))
378 return;
379
380 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
381 return;
382}
383
d18d6d65
IM
384int __initdata early_ioremap_debug;
385
386static int __init early_ioremap_debug_setup(char *str)
387{
388 early_ioremap_debug = 1;
389
793b24a2 390 return 0;
d18d6d65 391}
793b24a2 392early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 393
0947b2f3 394static __initdata int after_paging_init;
a7bf0bd5 395static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
0947b2f3 396
551889a6 397static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 398{
37cc8d7f
JF
399 /* Don't assume we're using swapper_pg_dir at this point */
400 pgd_t *base = __va(read_cr3());
401 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
402 pud_t *pud = pud_offset(pgd, addr);
403 pmd_t *pmd = pmd_offset(pud, addr);
404
405 return pmd;
0947b2f3
HY
406}
407
551889a6 408static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 409{
551889a6 410 return &bm_pte[pte_index(addr)];
0947b2f3
HY
411}
412
beacfaac 413void __init early_ioremap_init(void)
0947b2f3 414{
551889a6 415 pmd_t *pmd;
0947b2f3 416
d18d6d65 417 if (early_ioremap_debug)
adafdf6a 418 printk(KERN_INFO "early_ioremap_init()\n");
d18d6d65 419
551889a6 420 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3 421 memset(bm_pte, 0, sizeof(bm_pte));
b6fbb669 422 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 423
0e3a9549 424 /*
551889a6 425 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
426 * we are not prepared:
427 */
551889a6 428 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 429 WARN_ON(1);
551889a6
IC
430 printk(KERN_WARNING "pmd %p != %p\n",
431 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 432 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 433 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 434 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 435 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
436
437 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
438 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
439 FIX_BTMAP_BEGIN);
0e3a9549 440 }
0947b2f3
HY
441}
442
beacfaac 443void __init early_ioremap_clear(void)
0947b2f3 444{
551889a6 445 pmd_t *pmd;
0947b2f3 446
d18d6d65 447 if (early_ioremap_debug)
adafdf6a 448 printk(KERN_INFO "early_ioremap_clear()\n");
d18d6d65 449
551889a6
IC
450 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
451 pmd_clear(pmd);
6944a9c8 452 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
0947b2f3
HY
453 __flush_tlb_all();
454}
455
beacfaac 456void __init early_ioremap_reset(void)
0947b2f3
HY
457{
458 enum fixed_addresses idx;
551889a6
IC
459 unsigned long addr, phys;
460 pte_t *pte;
0947b2f3
HY
461
462 after_paging_init = 1;
64a8f852 463 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 464 addr = fix_to_virt(idx);
beacfaac 465 pte = early_ioremap_pte(addr);
551889a6
IC
466 if (pte_present(*pte)) {
467 phys = pte_val(*pte) & PAGE_MASK;
0947b2f3
HY
468 set_fixmap(idx, phys);
469 }
470 }
471}
472
beacfaac 473static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
474 unsigned long phys, pgprot_t flags)
475{
551889a6
IC
476 unsigned long addr = __fix_to_virt(idx);
477 pte_t *pte;
0947b2f3
HY
478
479 if (idx >= __end_of_fixed_addresses) {
480 BUG();
481 return;
482 }
beacfaac 483 pte = early_ioremap_pte(addr);
4583ed51 484
0947b2f3 485 if (pgprot_val(flags))
551889a6 486 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 487 else
551889a6 488 pte_clear(NULL, addr, pte);
0947b2f3
HY
489 __flush_tlb_one(addr);
490}
491
beacfaac 492static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
493 unsigned long phys)
494{
495 if (after_paging_init)
496 set_fixmap(idx, phys);
497 else
beacfaac 498 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
499}
500
beacfaac 501static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
502{
503 if (after_paging_init)
504 clear_fixmap(idx);
505 else
beacfaac 506 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
507}
508
1b42f516
IM
509
510int __initdata early_ioremap_nested;
511
d690b2af
IM
512static int __init check_early_ioremap_leak(void)
513{
514 if (!early_ioremap_nested)
515 return 0;
516
517 printk(KERN_WARNING
91eebf40
TG
518 "Debug warning: early ioremap leak of %d areas detected.\n",
519 early_ioremap_nested);
d690b2af 520 printk(KERN_WARNING
91eebf40 521 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
522 WARN_ON(1);
523
524 return 1;
525}
526late_initcall(check_early_ioremap_leak);
527
beacfaac 528void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
529{
530 unsigned long offset, last_addr;
1b42f516
IM
531 unsigned int nrpages, nesting;
532 enum fixed_addresses idx0, idx;
533
534 WARN_ON(system_state != SYSTEM_BOOTING);
535
536 nesting = early_ioremap_nested;
d18d6d65 537 if (early_ioremap_debug) {
adafdf6a 538 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
91eebf40 539 phys_addr, size, nesting);
d18d6d65
IM
540 dump_stack();
541 }
1da177e4
LT
542
543 /* Don't allow wraparound or zero size */
544 last_addr = phys_addr + size - 1;
bd796ed0
IM
545 if (!size || last_addr < phys_addr) {
546 WARN_ON(1);
1da177e4 547 return NULL;
bd796ed0 548 }
1da177e4 549
bd796ed0
IM
550 if (nesting >= FIX_BTMAPS_NESTING) {
551 WARN_ON(1);
1b42f516 552 return NULL;
bd796ed0 553 }
1b42f516 554 early_ioremap_nested++;
1da177e4
LT
555 /*
556 * Mappings have to be page-aligned
557 */
558 offset = phys_addr & ~PAGE_MASK;
559 phys_addr &= PAGE_MASK;
560 size = PAGE_ALIGN(last_addr) - phys_addr;
561
562 /*
563 * Mappings have to fit in the FIX_BTMAP area.
564 */
565 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
566 if (nrpages > NR_FIX_BTMAPS) {
567 WARN_ON(1);
1da177e4 568 return NULL;
bd796ed0 569 }
1da177e4
LT
570
571 /*
572 * Ok, go for it..
573 */
1b42f516
IM
574 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
575 idx = idx0;
1da177e4 576 while (nrpages > 0) {
beacfaac 577 early_set_fixmap(idx, phys_addr);
1da177e4
LT
578 phys_addr += PAGE_SIZE;
579 --idx;
580 --nrpages;
581 }
d18d6d65
IM
582 if (early_ioremap_debug)
583 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 584
91eebf40 585 return (void *) (offset + fix_to_virt(idx0));
1da177e4
LT
586}
587
beacfaac 588void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
589{
590 unsigned long virt_addr;
591 unsigned long offset;
592 unsigned int nrpages;
593 enum fixed_addresses idx;
226e9a93 594 int nesting;
1b42f516
IM
595
596 nesting = --early_ioremap_nested;
226e9a93
IM
597 if (WARN_ON(nesting < 0))
598 return;
1da177e4 599
d18d6d65 600 if (early_ioremap_debug) {
adafdf6a 601 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
91eebf40 602 size, nesting);
d18d6d65
IM
603 dump_stack();
604 }
605
1da177e4 606 virt_addr = (unsigned long)addr;
bd796ed0
IM
607 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
608 WARN_ON(1);
1da177e4 609 return;
bd796ed0 610 }
1da177e4
LT
611 offset = virt_addr & ~PAGE_MASK;
612 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
613
1b42f516 614 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 615 while (nrpages > 0) {
beacfaac 616 early_clear_fixmap(idx);
1da177e4
LT
617 --idx;
618 --nrpages;
619 }
620}
1b42f516
IM
621
622void __this_fixmap_does_not_exist(void)
623{
624 WARN_ON(1);
625}