Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Re-map IO memory to kernel address space so that we can access it. |
3 | * This is needed for high PCI addresses that aren't mapped in the | |
4 | * 640k-1MB IO memory area on PC's | |
5 | * | |
6 | * (C) Copyright 1995 1996 Linus Torvalds | |
7 | */ | |
8 | ||
e9332cac | 9 | #include <linux/bootmem.h> |
1da177e4 | 10 | #include <linux/init.h> |
a148ecfd | 11 | #include <linux/io.h> |
3cbd09e4 TG |
12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | |
14 | #include <linux/vmalloc.h> | |
15 | ||
1da177e4 | 16 | #include <asm/cacheflush.h> |
3cbd09e4 TG |
17 | #include <asm/e820.h> |
18 | #include <asm/fixmap.h> | |
1da177e4 | 19 | #include <asm/pgtable.h> |
3cbd09e4 | 20 | #include <asm/tlbflush.h> |
f6df72e7 | 21 | #include <asm/pgalloc.h> |
1da177e4 | 22 | |
240d3a7c TG |
23 | #ifdef CONFIG_X86_64 |
24 | ||
25 | unsigned long __phys_addr(unsigned long x) | |
26 | { | |
27 | if (x >= __START_KERNEL_map) | |
28 | return x - __START_KERNEL_map + phys_base; | |
29 | return x - PAGE_OFFSET; | |
30 | } | |
31 | EXPORT_SYMBOL(__phys_addr); | |
32 | ||
e3100c82 TG |
33 | static inline int phys_addr_valid(unsigned long addr) |
34 | { | |
35 | return addr < (1UL << boot_cpu_data.x86_phys_bits); | |
36 | } | |
37 | ||
38 | #else | |
39 | ||
40 | static inline int phys_addr_valid(unsigned long addr) | |
41 | { | |
42 | return 1; | |
43 | } | |
44 | ||
240d3a7c TG |
45 | #endif |
46 | ||
5f5192b9 TG |
47 | int page_is_ram(unsigned long pagenr) |
48 | { | |
49 | unsigned long addr, end; | |
50 | int i; | |
51 | ||
d8a9e6a5 AV |
52 | /* |
53 | * A special case is the first 4Kb of memory; | |
54 | * This is a BIOS owned area, not kernel ram, but generally | |
55 | * not listed as such in the E820 table. | |
56 | */ | |
57 | if (pagenr == 0) | |
58 | return 0; | |
59 | ||
156fbc3f AV |
60 | /* |
61 | * Second special case: Some BIOSen report the PC BIOS | |
62 | * area (640->1Mb) as ram even though it is not. | |
63 | */ | |
64 | if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) && | |
65 | pagenr < (BIOS_END >> PAGE_SHIFT)) | |
66 | return 0; | |
d8a9e6a5 | 67 | |
5f5192b9 TG |
68 | for (i = 0; i < e820.nr_map; i++) { |
69 | /* | |
70 | * Not usable memory: | |
71 | */ | |
72 | if (e820.map[i].type != E820_RAM) | |
73 | continue; | |
5f5192b9 TG |
74 | addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT; |
75 | end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT; | |
950f9d95 | 76 | |
950f9d95 | 77 | |
5f5192b9 TG |
78 | if ((pagenr >= addr) && (pagenr < end)) |
79 | return 1; | |
80 | } | |
81 | return 0; | |
82 | } | |
83 | ||
e9332cac TG |
84 | /* |
85 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | |
86 | * conflicts. | |
87 | */ | |
3a96ce8c | 88 | int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
89 | unsigned long prot_val) | |
e9332cac | 90 | { |
d806e5ee | 91 | unsigned long nrpages = size >> PAGE_SHIFT; |
93809be8 | 92 | int err; |
e9332cac | 93 | |
3a96ce8c | 94 | switch (prot_val) { |
95 | case _PAGE_CACHE_UC: | |
d806e5ee TG |
96 | default: |
97 | err = set_memory_uc(vaddr, nrpages); | |
98 | break; | |
3a96ce8c | 99 | case _PAGE_CACHE_WB: |
d806e5ee TG |
100 | err = set_memory_wb(vaddr, nrpages); |
101 | break; | |
102 | } | |
e9332cac TG |
103 | |
104 | return err; | |
105 | } | |
106 | ||
1da177e4 LT |
107 | /* |
108 | * Remap an arbitrary physical address space into the kernel virtual | |
109 | * address space. Needed when the kernel wants to access high addresses | |
110 | * directly. | |
111 | * | |
112 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | |
113 | * have to convert them into an offset in a page-aligned mapping, but the | |
114 | * caller shouldn't need to know that small detail. | |
115 | */ | |
b9e76a00 | 116 | static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, |
3a96ce8c | 117 | unsigned long prot_val) |
1da177e4 | 118 | { |
e66aadbe | 119 | unsigned long pfn, offset, last_addr, vaddr; |
91eebf40 | 120 | struct vm_struct *area; |
d806e5ee | 121 | pgprot_t prot; |
1da177e4 LT |
122 | |
123 | /* Don't allow wraparound or zero size */ | |
124 | last_addr = phys_addr + size - 1; | |
125 | if (!size || last_addr < phys_addr) | |
126 | return NULL; | |
127 | ||
e3100c82 TG |
128 | if (!phys_addr_valid(phys_addr)) { |
129 | printk(KERN_WARNING "ioremap: invalid physical address %lx\n", | |
130 | phys_addr); | |
131 | WARN_ON_ONCE(1); | |
132 | return NULL; | |
133 | } | |
134 | ||
1da177e4 LT |
135 | /* |
136 | * Don't remap the low PCI/ISA area, it's always mapped.. | |
137 | */ | |
138 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) | |
4b40fcee | 139 | return (__force void __iomem *)phys_to_virt(phys_addr); |
1da177e4 LT |
140 | |
141 | /* | |
142 | * Don't allow anybody to remap normal RAM that we're using.. | |
143 | */ | |
bdd3cee2 IM |
144 | for (pfn = phys_addr >> PAGE_SHIFT; |
145 | (pfn << PAGE_SHIFT) < last_addr; pfn++) { | |
146 | ||
ba748d22 IM |
147 | int is_ram = page_is_ram(pfn); |
148 | ||
149 | if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) | |
266b9f87 | 150 | return NULL; |
ba748d22 | 151 | WARN_ON_ONCE(is_ram); |
1da177e4 LT |
152 | } |
153 | ||
3a96ce8c | 154 | switch (prot_val) { |
155 | case _PAGE_CACHE_UC: | |
d806e5ee | 156 | default: |
55c62682 | 157 | prot = PAGE_KERNEL_NOCACHE; |
d806e5ee | 158 | break; |
3a96ce8c | 159 | case _PAGE_CACHE_WB: |
d806e5ee TG |
160 | prot = PAGE_KERNEL; |
161 | break; | |
162 | } | |
a148ecfd | 163 | |
1da177e4 LT |
164 | /* |
165 | * Mappings have to be page-aligned | |
166 | */ | |
167 | offset = phys_addr & ~PAGE_MASK; | |
168 | phys_addr &= PAGE_MASK; | |
169 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | |
170 | ||
171 | /* | |
172 | * Ok, go for it.. | |
173 | */ | |
74ff2857 | 174 | area = get_vm_area(size, VM_IOREMAP); |
1da177e4 LT |
175 | if (!area) |
176 | return NULL; | |
177 | area->phys_addr = phys_addr; | |
e66aadbe TG |
178 | vaddr = (unsigned long) area->addr; |
179 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { | |
b16bf712 | 180 | free_vm_area(area); |
1da177e4 LT |
181 | return NULL; |
182 | } | |
e9332cac | 183 | |
3a96ce8c | 184 | if (ioremap_change_attr(vaddr, size, prot_val) < 0) { |
e66aadbe | 185 | vunmap(area->addr); |
e9332cac TG |
186 | return NULL; |
187 | } | |
188 | ||
e66aadbe | 189 | return (void __iomem *) (vaddr + offset); |
1da177e4 | 190 | } |
1da177e4 LT |
191 | |
192 | /** | |
193 | * ioremap_nocache - map bus memory into CPU space | |
194 | * @offset: bus address of the memory | |
195 | * @size: size of the resource to map | |
196 | * | |
197 | * ioremap_nocache performs a platform specific sequence of operations to | |
198 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
199 | * writew/writel functions and the other mmio helpers. The returned | |
200 | * address is not guaranteed to be usable directly as a virtual | |
91eebf40 | 201 | * address. |
1da177e4 LT |
202 | * |
203 | * This version of ioremap ensures that the memory is marked uncachable | |
204 | * on the CPU as well as honouring existing caching rules from things like | |
91eebf40 | 205 | * the PCI bus. Note that there are other caches and buffers on many |
1da177e4 LT |
206 | * busses. In particular driver authors should read up on PCI writes |
207 | * | |
208 | * It's useful if some control registers are in such an area and | |
209 | * write combining or read caching is not desirable: | |
91eebf40 | 210 | * |
1da177e4 LT |
211 | * Must be freed with iounmap. |
212 | */ | |
b9e76a00 | 213 | void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) |
1da177e4 | 214 | { |
3a96ce8c | 215 | return __ioremap(phys_addr, size, _PAGE_CACHE_UC); |
1da177e4 | 216 | } |
129f6946 | 217 | EXPORT_SYMBOL(ioremap_nocache); |
1da177e4 | 218 | |
b9e76a00 | 219 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) |
5f868152 | 220 | { |
3a96ce8c | 221 | return __ioremap(phys_addr, size, _PAGE_CACHE_WB); |
5f868152 TG |
222 | } |
223 | EXPORT_SYMBOL(ioremap_cache); | |
224 | ||
bf5421c3 AK |
225 | /** |
226 | * iounmap - Free a IO remapping | |
227 | * @addr: virtual address from ioremap_* | |
228 | * | |
229 | * Caller must ensure there is only one unmapping for the same pointer. | |
230 | */ | |
1da177e4 LT |
231 | void iounmap(volatile void __iomem *addr) |
232 | { | |
bf5421c3 | 233 | struct vm_struct *p, *o; |
c23a4e96 AM |
234 | |
235 | if ((void __force *)addr <= high_memory) | |
1da177e4 LT |
236 | return; |
237 | ||
238 | /* | |
239 | * __ioremap special-cases the PCI/ISA range by not instantiating a | |
240 | * vm_area and by simply returning an address into the kernel mapping | |
241 | * of ISA space. So handle that here. | |
242 | */ | |
243 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && | |
91eebf40 | 244 | addr < phys_to_virt(ISA_END_ADDRESS)) |
1da177e4 LT |
245 | return; |
246 | ||
91eebf40 TG |
247 | addr = (volatile void __iomem *) |
248 | (PAGE_MASK & (unsigned long __force)addr); | |
bf5421c3 AK |
249 | |
250 | /* Use the vm area unlocked, assuming the caller | |
251 | ensures there isn't another iounmap for the same address | |
252 | in parallel. Reuse of the virtual address is prevented by | |
253 | leaving it in the global lists until we're done with it. | |
254 | cpa takes care of the direct mappings. */ | |
255 | read_lock(&vmlist_lock); | |
256 | for (p = vmlist; p; p = p->next) { | |
257 | if (p->addr == addr) | |
258 | break; | |
259 | } | |
260 | read_unlock(&vmlist_lock); | |
261 | ||
262 | if (!p) { | |
91eebf40 | 263 | printk(KERN_ERR "iounmap: bad address %p\n", addr); |
c23a4e96 | 264 | dump_stack(); |
bf5421c3 | 265 | return; |
1da177e4 LT |
266 | } |
267 | ||
bf5421c3 AK |
268 | /* Finally remove it */ |
269 | o = remove_vm_area((void *)addr); | |
270 | BUG_ON(p != o || o == NULL); | |
91eebf40 | 271 | kfree(p); |
1da177e4 | 272 | } |
129f6946 | 273 | EXPORT_SYMBOL(iounmap); |
1da177e4 | 274 | |
240d3a7c | 275 | #ifdef CONFIG_X86_32 |
d18d6d65 IM |
276 | |
277 | int __initdata early_ioremap_debug; | |
278 | ||
279 | static int __init early_ioremap_debug_setup(char *str) | |
280 | { | |
281 | early_ioremap_debug = 1; | |
282 | ||
793b24a2 | 283 | return 0; |
d18d6d65 | 284 | } |
793b24a2 | 285 | early_param("early_ioremap_debug", early_ioremap_debug_setup); |
d18d6d65 | 286 | |
0947b2f3 | 287 | static __initdata int after_paging_init; |
c92a7a54 IC |
288 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] |
289 | __section(.bss.page_aligned); | |
0947b2f3 | 290 | |
551889a6 | 291 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) |
0947b2f3 | 292 | { |
37cc8d7f JF |
293 | /* Don't assume we're using swapper_pg_dir at this point */ |
294 | pgd_t *base = __va(read_cr3()); | |
295 | pgd_t *pgd = &base[pgd_index(addr)]; | |
551889a6 IC |
296 | pud_t *pud = pud_offset(pgd, addr); |
297 | pmd_t *pmd = pmd_offset(pud, addr); | |
298 | ||
299 | return pmd; | |
0947b2f3 HY |
300 | } |
301 | ||
551889a6 | 302 | static inline pte_t * __init early_ioremap_pte(unsigned long addr) |
0947b2f3 | 303 | { |
551889a6 | 304 | return &bm_pte[pte_index(addr)]; |
0947b2f3 HY |
305 | } |
306 | ||
beacfaac | 307 | void __init early_ioremap_init(void) |
0947b2f3 | 308 | { |
551889a6 | 309 | pmd_t *pmd; |
0947b2f3 | 310 | |
d18d6d65 | 311 | if (early_ioremap_debug) |
adafdf6a | 312 | printk(KERN_INFO "early_ioremap_init()\n"); |
d18d6d65 | 313 | |
551889a6 | 314 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); |
0947b2f3 | 315 | memset(bm_pte, 0, sizeof(bm_pte)); |
b6fbb669 | 316 | pmd_populate_kernel(&init_mm, pmd, bm_pte); |
551889a6 | 317 | |
0e3a9549 | 318 | /* |
551889a6 | 319 | * The boot-ioremap range spans multiple pmds, for which |
0e3a9549 IM |
320 | * we are not prepared: |
321 | */ | |
551889a6 | 322 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { |
0e3a9549 | 323 | WARN_ON(1); |
551889a6 IC |
324 | printk(KERN_WARNING "pmd %p != %p\n", |
325 | pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); | |
91eebf40 | 326 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", |
551889a6 | 327 | fix_to_virt(FIX_BTMAP_BEGIN)); |
91eebf40 | 328 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", |
551889a6 | 329 | fix_to_virt(FIX_BTMAP_END)); |
91eebf40 TG |
330 | |
331 | printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); | |
332 | printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", | |
333 | FIX_BTMAP_BEGIN); | |
0e3a9549 | 334 | } |
0947b2f3 HY |
335 | } |
336 | ||
beacfaac | 337 | void __init early_ioremap_clear(void) |
0947b2f3 | 338 | { |
551889a6 | 339 | pmd_t *pmd; |
0947b2f3 | 340 | |
d18d6d65 | 341 | if (early_ioremap_debug) |
adafdf6a | 342 | printk(KERN_INFO "early_ioremap_clear()\n"); |
d18d6d65 | 343 | |
551889a6 IC |
344 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); |
345 | pmd_clear(pmd); | |
b6fbb669 | 346 | paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT); |
0947b2f3 HY |
347 | __flush_tlb_all(); |
348 | } | |
349 | ||
beacfaac | 350 | void __init early_ioremap_reset(void) |
0947b2f3 HY |
351 | { |
352 | enum fixed_addresses idx; | |
551889a6 IC |
353 | unsigned long addr, phys; |
354 | pte_t *pte; | |
0947b2f3 HY |
355 | |
356 | after_paging_init = 1; | |
64a8f852 | 357 | for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { |
0947b2f3 | 358 | addr = fix_to_virt(idx); |
beacfaac | 359 | pte = early_ioremap_pte(addr); |
551889a6 IC |
360 | if (pte_present(*pte)) { |
361 | phys = pte_val(*pte) & PAGE_MASK; | |
0947b2f3 HY |
362 | set_fixmap(idx, phys); |
363 | } | |
364 | } | |
365 | } | |
366 | ||
beacfaac | 367 | static void __init __early_set_fixmap(enum fixed_addresses idx, |
0947b2f3 HY |
368 | unsigned long phys, pgprot_t flags) |
369 | { | |
551889a6 IC |
370 | unsigned long addr = __fix_to_virt(idx); |
371 | pte_t *pte; | |
0947b2f3 HY |
372 | |
373 | if (idx >= __end_of_fixed_addresses) { | |
374 | BUG(); | |
375 | return; | |
376 | } | |
beacfaac | 377 | pte = early_ioremap_pte(addr); |
0947b2f3 | 378 | if (pgprot_val(flags)) |
551889a6 | 379 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); |
0947b2f3 | 380 | else |
551889a6 | 381 | pte_clear(NULL, addr, pte); |
0947b2f3 HY |
382 | __flush_tlb_one(addr); |
383 | } | |
384 | ||
beacfaac | 385 | static inline void __init early_set_fixmap(enum fixed_addresses idx, |
0947b2f3 HY |
386 | unsigned long phys) |
387 | { | |
388 | if (after_paging_init) | |
389 | set_fixmap(idx, phys); | |
390 | else | |
beacfaac | 391 | __early_set_fixmap(idx, phys, PAGE_KERNEL); |
0947b2f3 HY |
392 | } |
393 | ||
beacfaac | 394 | static inline void __init early_clear_fixmap(enum fixed_addresses idx) |
0947b2f3 HY |
395 | { |
396 | if (after_paging_init) | |
397 | clear_fixmap(idx); | |
398 | else | |
beacfaac | 399 | __early_set_fixmap(idx, 0, __pgprot(0)); |
0947b2f3 HY |
400 | } |
401 | ||
1b42f516 IM |
402 | |
403 | int __initdata early_ioremap_nested; | |
404 | ||
d690b2af IM |
405 | static int __init check_early_ioremap_leak(void) |
406 | { | |
407 | if (!early_ioremap_nested) | |
408 | return 0; | |
409 | ||
410 | printk(KERN_WARNING | |
91eebf40 TG |
411 | "Debug warning: early ioremap leak of %d areas detected.\n", |
412 | early_ioremap_nested); | |
d690b2af | 413 | printk(KERN_WARNING |
91eebf40 | 414 | "please boot with early_ioremap_debug and report the dmesg.\n"); |
d690b2af IM |
415 | WARN_ON(1); |
416 | ||
417 | return 1; | |
418 | } | |
419 | late_initcall(check_early_ioremap_leak); | |
420 | ||
beacfaac | 421 | void __init *early_ioremap(unsigned long phys_addr, unsigned long size) |
1da177e4 LT |
422 | { |
423 | unsigned long offset, last_addr; | |
1b42f516 IM |
424 | unsigned int nrpages, nesting; |
425 | enum fixed_addresses idx0, idx; | |
426 | ||
427 | WARN_ON(system_state != SYSTEM_BOOTING); | |
428 | ||
429 | nesting = early_ioremap_nested; | |
d18d6d65 | 430 | if (early_ioremap_debug) { |
adafdf6a | 431 | printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", |
91eebf40 | 432 | phys_addr, size, nesting); |
d18d6d65 IM |
433 | dump_stack(); |
434 | } | |
1da177e4 LT |
435 | |
436 | /* Don't allow wraparound or zero size */ | |
437 | last_addr = phys_addr + size - 1; | |
bd796ed0 IM |
438 | if (!size || last_addr < phys_addr) { |
439 | WARN_ON(1); | |
1da177e4 | 440 | return NULL; |
bd796ed0 | 441 | } |
1da177e4 | 442 | |
bd796ed0 IM |
443 | if (nesting >= FIX_BTMAPS_NESTING) { |
444 | WARN_ON(1); | |
1b42f516 | 445 | return NULL; |
bd796ed0 | 446 | } |
1b42f516 | 447 | early_ioremap_nested++; |
1da177e4 LT |
448 | /* |
449 | * Mappings have to be page-aligned | |
450 | */ | |
451 | offset = phys_addr & ~PAGE_MASK; | |
452 | phys_addr &= PAGE_MASK; | |
453 | size = PAGE_ALIGN(last_addr) - phys_addr; | |
454 | ||
455 | /* | |
456 | * Mappings have to fit in the FIX_BTMAP area. | |
457 | */ | |
458 | nrpages = size >> PAGE_SHIFT; | |
bd796ed0 IM |
459 | if (nrpages > NR_FIX_BTMAPS) { |
460 | WARN_ON(1); | |
1da177e4 | 461 | return NULL; |
bd796ed0 | 462 | } |
1da177e4 LT |
463 | |
464 | /* | |
465 | * Ok, go for it.. | |
466 | */ | |
1b42f516 IM |
467 | idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; |
468 | idx = idx0; | |
1da177e4 | 469 | while (nrpages > 0) { |
beacfaac | 470 | early_set_fixmap(idx, phys_addr); |
1da177e4 LT |
471 | phys_addr += PAGE_SIZE; |
472 | --idx; | |
473 | --nrpages; | |
474 | } | |
d18d6d65 IM |
475 | if (early_ioremap_debug) |
476 | printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); | |
1b42f516 | 477 | |
91eebf40 | 478 | return (void *) (offset + fix_to_virt(idx0)); |
1da177e4 LT |
479 | } |
480 | ||
beacfaac | 481 | void __init early_iounmap(void *addr, unsigned long size) |
1da177e4 LT |
482 | { |
483 | unsigned long virt_addr; | |
484 | unsigned long offset; | |
485 | unsigned int nrpages; | |
486 | enum fixed_addresses idx; | |
1b42f516 IM |
487 | unsigned int nesting; |
488 | ||
489 | nesting = --early_ioremap_nested; | |
bd796ed0 | 490 | WARN_ON(nesting < 0); |
1da177e4 | 491 | |
d18d6d65 | 492 | if (early_ioremap_debug) { |
adafdf6a | 493 | printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, |
91eebf40 | 494 | size, nesting); |
d18d6d65 IM |
495 | dump_stack(); |
496 | } | |
497 | ||
1da177e4 | 498 | virt_addr = (unsigned long)addr; |
bd796ed0 IM |
499 | if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { |
500 | WARN_ON(1); | |
1da177e4 | 501 | return; |
bd796ed0 | 502 | } |
1da177e4 LT |
503 | offset = virt_addr & ~PAGE_MASK; |
504 | nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; | |
505 | ||
1b42f516 | 506 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; |
1da177e4 | 507 | while (nrpages > 0) { |
beacfaac | 508 | early_clear_fixmap(idx); |
1da177e4 LT |
509 | --idx; |
510 | --nrpages; | |
511 | } | |
512 | } | |
1b42f516 IM |
513 | |
514 | void __this_fixmap_does_not_exist(void) | |
515 | { | |
516 | WARN_ON(1); | |
517 | } | |
240d3a7c TG |
518 | |
519 | #endif /* CONFIG_X86_32 */ |