x86: mmiotrace, preview 2
[linux-block.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
d61fc448 15#include <linux/mmiotrace.h>
3cbd09e4 16
1da177e4 17#include <asm/cacheflush.h>
3cbd09e4
TG
18#include <asm/e820.h>
19#include <asm/fixmap.h>
1da177e4 20#include <asm/pgtable.h>
3cbd09e4 21#include <asm/tlbflush.h>
f6df72e7 22#include <asm/pgalloc.h>
d7677d40 23#include <asm/pat.h>
1da177e4 24
240d3a7c
TG
25#ifdef CONFIG_X86_64
26
27unsigned long __phys_addr(unsigned long x)
28{
29 if (x >= __START_KERNEL_map)
30 return x - __START_KERNEL_map + phys_base;
31 return x - PAGE_OFFSET;
32}
33EXPORT_SYMBOL(__phys_addr);
34
e3100c82
TG
35static inline int phys_addr_valid(unsigned long addr)
36{
37 return addr < (1UL << boot_cpu_data.x86_phys_bits);
38}
39
40#else
41
42static inline int phys_addr_valid(unsigned long addr)
43{
44 return 1;
45}
46
240d3a7c
TG
47#endif
48
5f5192b9
TG
49int page_is_ram(unsigned long pagenr)
50{
756a6c68 51 resource_size_t addr, end;
5f5192b9
TG
52 int i;
53
d8a9e6a5
AV
54 /*
55 * A special case is the first 4Kb of memory;
56 * This is a BIOS owned area, not kernel ram, but generally
57 * not listed as such in the E820 table.
58 */
59 if (pagenr == 0)
60 return 0;
61
156fbc3f
AV
62 /*
63 * Second special case: Some BIOSen report the PC BIOS
64 * area (640->1Mb) as ram even though it is not.
65 */
66 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
67 pagenr < (BIOS_END >> PAGE_SHIFT))
68 return 0;
d8a9e6a5 69
5f5192b9
TG
70 for (i = 0; i < e820.nr_map; i++) {
71 /*
72 * Not usable memory:
73 */
74 if (e820.map[i].type != E820_RAM)
75 continue;
5f5192b9
TG
76 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
77 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
950f9d95 78
950f9d95 79
5f5192b9
TG
80 if ((pagenr >= addr) && (pagenr < end))
81 return 1;
82 }
83 return 0;
84}
85
e9332cac
TG
86/*
87 * Fix up the linear direct mapping of the kernel to avoid cache attribute
88 * conflicts.
89 */
3a96ce8c 90int ioremap_change_attr(unsigned long vaddr, unsigned long size,
91 unsigned long prot_val)
e9332cac 92{
d806e5ee 93 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 94 int err;
e9332cac 95
3a96ce8c 96 switch (prot_val) {
97 case _PAGE_CACHE_UC:
d806e5ee 98 default:
1219333d 99 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 100 break;
b310f381 101 case _PAGE_CACHE_WC:
102 err = _set_memory_wc(vaddr, nrpages);
103 break;
3a96ce8c 104 case _PAGE_CACHE_WB:
1219333d 105 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
106 break;
107 }
e9332cac
TG
108
109 return err;
110}
111
1da177e4
LT
112/*
113 * Remap an arbitrary physical address space into the kernel virtual
114 * address space. Needed when the kernel wants to access high addresses
115 * directly.
116 *
117 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118 * have to convert them into an offset in a page-aligned mapping, but the
119 * caller shouldn't need to know that small detail.
120 */
23016969
CL
121static void __iomem *__ioremap_caller(resource_size_t phys_addr,
122 unsigned long size, unsigned long prot_val, void *caller)
1da177e4 123{
756a6c68
IM
124 unsigned long pfn, offset, vaddr;
125 resource_size_t last_addr;
91eebf40 126 struct vm_struct *area;
d7677d40 127 unsigned long new_prot_val;
d806e5ee 128 pgprot_t prot;
dee7cbb2 129 int retval;
d61fc448 130 void __iomem *ret_addr;
1da177e4
LT
131
132 /* Don't allow wraparound or zero size */
133 last_addr = phys_addr + size - 1;
134 if (!size || last_addr < phys_addr)
135 return NULL;
136
e3100c82 137 if (!phys_addr_valid(phys_addr)) {
6997ab49 138 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 139 (unsigned long long)phys_addr);
e3100c82
TG
140 WARN_ON_ONCE(1);
141 return NULL;
142 }
143
1da177e4
LT
144 /*
145 * Don't remap the low PCI/ISA area, it's always mapped..
146 */
147 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
4b40fcee 148 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
149
150 /*
151 * Don't allow anybody to remap normal RAM that we're using..
152 */
cb8ab687
AS
153 for (pfn = phys_addr >> PAGE_SHIFT;
154 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
155 pfn++) {
bdd3cee2 156
ba748d22
IM
157 int is_ram = page_is_ram(pfn);
158
159 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
266b9f87 160 return NULL;
ba748d22 161 WARN_ON_ONCE(is_ram);
1da177e4
LT
162 }
163
d7677d40 164 /*
165 * Mappings have to be page-aligned
166 */
167 offset = phys_addr & ~PAGE_MASK;
168 phys_addr &= PAGE_MASK;
169 size = PAGE_ALIGN(last_addr+1) - phys_addr;
170
dee7cbb2
VP
171 retval = reserve_memtype(phys_addr, phys_addr + size,
172 prot_val, &new_prot_val);
173 if (retval) {
b450e5e8 174 pr_debug("Warning: reserve_memtype returned %d\n", retval);
dee7cbb2
VP
175 return NULL;
176 }
177
178 if (prot_val != new_prot_val) {
d7677d40 179 /*
180 * Do not fallback to certain memory types with certain
181 * requested type:
de33c442
SS
182 * - request is uc-, return cannot be write-back
183 * - request is uc-, return cannot be write-combine
b310f381 184 * - request is write-combine, return cannot be write-back
d7677d40 185 */
de33c442 186 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
b310f381 187 (new_prot_val == _PAGE_CACHE_WB ||
188 new_prot_val == _PAGE_CACHE_WC)) ||
189 (prot_val == _PAGE_CACHE_WC &&
d7677d40 190 new_prot_val == _PAGE_CACHE_WB)) {
b450e5e8 191 pr_debug(
6997ab49 192 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
4c8337ac
RD
193 (unsigned long long)phys_addr,
194 (unsigned long long)(phys_addr + size),
6997ab49 195 prot_val, new_prot_val);
d7677d40 196 free_memtype(phys_addr, phys_addr + size);
197 return NULL;
198 }
199 prot_val = new_prot_val;
200 }
201
3a96ce8c 202 switch (prot_val) {
203 case _PAGE_CACHE_UC:
d806e5ee 204 default:
55c62682 205 prot = PAGE_KERNEL_NOCACHE;
d806e5ee 206 break;
de33c442
SS
207 case _PAGE_CACHE_UC_MINUS:
208 prot = PAGE_KERNEL_UC_MINUS;
209 break;
b310f381 210 case _PAGE_CACHE_WC:
211 prot = PAGE_KERNEL_WC;
212 break;
3a96ce8c 213 case _PAGE_CACHE_WB:
d806e5ee
TG
214 prot = PAGE_KERNEL;
215 break;
216 }
a148ecfd 217
1da177e4
LT
218 /*
219 * Ok, go for it..
220 */
23016969 221 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4
LT
222 if (!area)
223 return NULL;
224 area->phys_addr = phys_addr;
e66aadbe
TG
225 vaddr = (unsigned long) area->addr;
226 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
d7677d40 227 free_memtype(phys_addr, phys_addr + size);
b16bf712 228 free_vm_area(area);
1da177e4
LT
229 return NULL;
230 }
e9332cac 231
3a96ce8c 232 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
d7677d40 233 free_memtype(phys_addr, phys_addr + size);
e66aadbe 234 vunmap(area->addr);
e9332cac
TG
235 return NULL;
236 }
237
d61fc448
PP
238 ret_addr = (void __iomem *) (vaddr + offset);
239 mmiotrace_ioremap(phys_addr, size, ret_addr);
240
241 return ret_addr;
1da177e4 242}
1da177e4
LT
243
244/**
245 * ioremap_nocache - map bus memory into CPU space
246 * @offset: bus address of the memory
247 * @size: size of the resource to map
248 *
249 * ioremap_nocache performs a platform specific sequence of operations to
250 * make bus memory CPU accessible via the readb/readw/readl/writeb/
251 * writew/writel functions and the other mmio helpers. The returned
252 * address is not guaranteed to be usable directly as a virtual
91eebf40 253 * address.
1da177e4
LT
254 *
255 * This version of ioremap ensures that the memory is marked uncachable
256 * on the CPU as well as honouring existing caching rules from things like
91eebf40 257 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
258 * busses. In particular driver authors should read up on PCI writes
259 *
260 * It's useful if some control registers are in such an area and
261 * write combining or read caching is not desirable:
91eebf40 262 *
1da177e4
LT
263 * Must be freed with iounmap.
264 */
b9e76a00 265void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 266{
de33c442
SS
267 /*
268 * Ideally, this should be:
269 * pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
270 *
271 * Till we fix all X drivers to use ioremap_wc(), we will use
272 * UC MINUS.
273 */
274 unsigned long val = _PAGE_CACHE_UC_MINUS;
275
276 return __ioremap_caller(phys_addr, size, val,
23016969 277 __builtin_return_address(0));
1da177e4 278}
129f6946 279EXPORT_SYMBOL(ioremap_nocache);
1da177e4 280
b310f381 281/**
282 * ioremap_wc - map memory into CPU space write combined
283 * @offset: bus address of the memory
284 * @size: size of the resource to map
285 *
286 * This version of ioremap ensures that the memory is marked write combining.
287 * Write combining allows faster writes to some hardware devices.
288 *
289 * Must be freed with iounmap.
290 */
291void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
292{
293 if (pat_wc_enabled)
23016969
CL
294 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
295 __builtin_return_address(0));
b310f381 296 else
297 return ioremap_nocache(phys_addr, size);
298}
299EXPORT_SYMBOL(ioremap_wc);
300
b9e76a00 301void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 302{
23016969
CL
303 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
304 __builtin_return_address(0));
5f868152
TG
305}
306EXPORT_SYMBOL(ioremap_cache);
307
bf5421c3
AK
308/**
309 * iounmap - Free a IO remapping
310 * @addr: virtual address from ioremap_*
311 *
312 * Caller must ensure there is only one unmapping for the same pointer.
313 */
1da177e4
LT
314void iounmap(volatile void __iomem *addr)
315{
bf5421c3 316 struct vm_struct *p, *o;
c23a4e96
AM
317
318 if ((void __force *)addr <= high_memory)
1da177e4
LT
319 return;
320
321 /*
322 * __ioremap special-cases the PCI/ISA range by not instantiating a
323 * vm_area and by simply returning an address into the kernel mapping
324 * of ISA space. So handle that here.
325 */
326 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
91eebf40 327 addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
328 return;
329
91eebf40
TG
330 addr = (volatile void __iomem *)
331 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3 332
d61fc448
PP
333 mmiotrace_iounmap(addr);
334
bf5421c3
AK
335 /* Use the vm area unlocked, assuming the caller
336 ensures there isn't another iounmap for the same address
337 in parallel. Reuse of the virtual address is prevented by
338 leaving it in the global lists until we're done with it.
339 cpa takes care of the direct mappings. */
340 read_lock(&vmlist_lock);
341 for (p = vmlist; p; p = p->next) {
342 if (p->addr == addr)
343 break;
344 }
345 read_unlock(&vmlist_lock);
346
347 if (!p) {
91eebf40 348 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 349 dump_stack();
bf5421c3 350 return;
1da177e4
LT
351 }
352
d7677d40 353 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
354
bf5421c3
AK
355 /* Finally remove it */
356 o = remove_vm_area((void *)addr);
357 BUG_ON(p != o || o == NULL);
91eebf40 358 kfree(p);
1da177e4 359}
129f6946 360EXPORT_SYMBOL(iounmap);
1da177e4 361
e045fb2a 362/*
363 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
364 * access
365 */
366void *xlate_dev_mem_ptr(unsigned long phys)
367{
368 void *addr;
369 unsigned long start = phys & PAGE_MASK;
370
371 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
372 if (page_is_ram(start >> PAGE_SHIFT))
373 return __va(phys);
374
375 addr = (void *)ioremap(start, PAGE_SIZE);
376 if (addr)
377 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
378
379 return addr;
380}
381
382void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
383{
384 if (page_is_ram(phys >> PAGE_SHIFT))
385 return;
386
387 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
388 return;
389}
390
240d3a7c 391#ifdef CONFIG_X86_32
d18d6d65
IM
392
393int __initdata early_ioremap_debug;
394
395static int __init early_ioremap_debug_setup(char *str)
396{
397 early_ioremap_debug = 1;
398
793b24a2 399 return 0;
d18d6d65 400}
793b24a2 401early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 402
0947b2f3 403static __initdata int after_paging_init;
c92a7a54
IC
404static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
405 __section(.bss.page_aligned);
0947b2f3 406
551889a6 407static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 408{
37cc8d7f
JF
409 /* Don't assume we're using swapper_pg_dir at this point */
410 pgd_t *base = __va(read_cr3());
411 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
412 pud_t *pud = pud_offset(pgd, addr);
413 pmd_t *pmd = pmd_offset(pud, addr);
414
415 return pmd;
0947b2f3
HY
416}
417
551889a6 418static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 419{
551889a6 420 return &bm_pte[pte_index(addr)];
0947b2f3
HY
421}
422
beacfaac 423void __init early_ioremap_init(void)
0947b2f3 424{
551889a6 425 pmd_t *pmd;
0947b2f3 426
d18d6d65 427 if (early_ioremap_debug)
adafdf6a 428 printk(KERN_INFO "early_ioremap_init()\n");
d18d6d65 429
551889a6 430 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3 431 memset(bm_pte, 0, sizeof(bm_pte));
b6fbb669 432 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 433
0e3a9549 434 /*
551889a6 435 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
436 * we are not prepared:
437 */
551889a6 438 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 439 WARN_ON(1);
551889a6
IC
440 printk(KERN_WARNING "pmd %p != %p\n",
441 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 442 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 443 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 444 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 445 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
446
447 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
448 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
449 FIX_BTMAP_BEGIN);
0e3a9549 450 }
0947b2f3
HY
451}
452
beacfaac 453void __init early_ioremap_clear(void)
0947b2f3 454{
551889a6 455 pmd_t *pmd;
0947b2f3 456
d18d6d65 457 if (early_ioremap_debug)
adafdf6a 458 printk(KERN_INFO "early_ioremap_clear()\n");
d18d6d65 459
551889a6
IC
460 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
461 pmd_clear(pmd);
6944a9c8 462 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
0947b2f3
HY
463 __flush_tlb_all();
464}
465
beacfaac 466void __init early_ioremap_reset(void)
0947b2f3
HY
467{
468 enum fixed_addresses idx;
551889a6
IC
469 unsigned long addr, phys;
470 pte_t *pte;
0947b2f3
HY
471
472 after_paging_init = 1;
64a8f852 473 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 474 addr = fix_to_virt(idx);
beacfaac 475 pte = early_ioremap_pte(addr);
551889a6
IC
476 if (pte_present(*pte)) {
477 phys = pte_val(*pte) & PAGE_MASK;
0947b2f3
HY
478 set_fixmap(idx, phys);
479 }
480 }
481}
482
beacfaac 483static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
484 unsigned long phys, pgprot_t flags)
485{
551889a6
IC
486 unsigned long addr = __fix_to_virt(idx);
487 pte_t *pte;
0947b2f3
HY
488
489 if (idx >= __end_of_fixed_addresses) {
490 BUG();
491 return;
492 }
beacfaac 493 pte = early_ioremap_pte(addr);
0947b2f3 494 if (pgprot_val(flags))
551889a6 495 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 496 else
551889a6 497 pte_clear(NULL, addr, pte);
0947b2f3
HY
498 __flush_tlb_one(addr);
499}
500
beacfaac 501static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
502 unsigned long phys)
503{
504 if (after_paging_init)
505 set_fixmap(idx, phys);
506 else
beacfaac 507 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
508}
509
beacfaac 510static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
511{
512 if (after_paging_init)
513 clear_fixmap(idx);
514 else
beacfaac 515 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
516}
517
1b42f516
IM
518
519int __initdata early_ioremap_nested;
520
d690b2af
IM
521static int __init check_early_ioremap_leak(void)
522{
523 if (!early_ioremap_nested)
524 return 0;
525
526 printk(KERN_WARNING
91eebf40
TG
527 "Debug warning: early ioremap leak of %d areas detected.\n",
528 early_ioremap_nested);
d690b2af 529 printk(KERN_WARNING
91eebf40 530 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
531 WARN_ON(1);
532
533 return 1;
534}
535late_initcall(check_early_ioremap_leak);
536
beacfaac 537void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
538{
539 unsigned long offset, last_addr;
1b42f516
IM
540 unsigned int nrpages, nesting;
541 enum fixed_addresses idx0, idx;
542
543 WARN_ON(system_state != SYSTEM_BOOTING);
544
545 nesting = early_ioremap_nested;
d18d6d65 546 if (early_ioremap_debug) {
adafdf6a 547 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
91eebf40 548 phys_addr, size, nesting);
d18d6d65
IM
549 dump_stack();
550 }
1da177e4
LT
551
552 /* Don't allow wraparound or zero size */
553 last_addr = phys_addr + size - 1;
bd796ed0
IM
554 if (!size || last_addr < phys_addr) {
555 WARN_ON(1);
1da177e4 556 return NULL;
bd796ed0 557 }
1da177e4 558
bd796ed0
IM
559 if (nesting >= FIX_BTMAPS_NESTING) {
560 WARN_ON(1);
1b42f516 561 return NULL;
bd796ed0 562 }
1b42f516 563 early_ioremap_nested++;
1da177e4
LT
564 /*
565 * Mappings have to be page-aligned
566 */
567 offset = phys_addr & ~PAGE_MASK;
568 phys_addr &= PAGE_MASK;
569 size = PAGE_ALIGN(last_addr) - phys_addr;
570
571 /*
572 * Mappings have to fit in the FIX_BTMAP area.
573 */
574 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
575 if (nrpages > NR_FIX_BTMAPS) {
576 WARN_ON(1);
1da177e4 577 return NULL;
bd796ed0 578 }
1da177e4
LT
579
580 /*
581 * Ok, go for it..
582 */
1b42f516
IM
583 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
584 idx = idx0;
1da177e4 585 while (nrpages > 0) {
beacfaac 586 early_set_fixmap(idx, phys_addr);
1da177e4
LT
587 phys_addr += PAGE_SIZE;
588 --idx;
589 --nrpages;
590 }
d18d6d65
IM
591 if (early_ioremap_debug)
592 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 593
91eebf40 594 return (void *) (offset + fix_to_virt(idx0));
1da177e4
LT
595}
596
beacfaac 597void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
598{
599 unsigned long virt_addr;
600 unsigned long offset;
601 unsigned int nrpages;
602 enum fixed_addresses idx;
1b42f516
IM
603 unsigned int nesting;
604
605 nesting = --early_ioremap_nested;
bd796ed0 606 WARN_ON(nesting < 0);
1da177e4 607
d18d6d65 608 if (early_ioremap_debug) {
adafdf6a 609 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
91eebf40 610 size, nesting);
d18d6d65
IM
611 dump_stack();
612 }
613
1da177e4 614 virt_addr = (unsigned long)addr;
bd796ed0
IM
615 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
616 WARN_ON(1);
1da177e4 617 return;
bd796ed0 618 }
1da177e4
LT
619 offset = virt_addr & ~PAGE_MASK;
620 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
621
1b42f516 622 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 623 while (nrpages > 0) {
beacfaac 624 early_clear_fixmap(idx);
1da177e4
LT
625 --idx;
626 --nrpages;
627 }
628}
1b42f516
IM
629
630void __this_fixmap_does_not_exist(void)
631{
632 WARN_ON(1);
633}
240d3a7c
TG
634
635#endif /* CONFIG_X86_32 */