Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Re-map IO memory to kernel address space so that we can access it. |
3 | * This is needed for high PCI addresses that aren't mapped in the | |
4 | * 640k-1MB IO memory area on PC's | |
5 | * | |
6 | * (C) Copyright 1995 1996 Linus Torvalds | |
7 | */ | |
8 | ||
e9332cac | 9 | #include <linux/bootmem.h> |
1da177e4 | 10 | #include <linux/init.h> |
a148ecfd | 11 | #include <linux/io.h> |
3cbd09e4 TG |
12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | |
14 | #include <linux/vmalloc.h> | |
d61fc448 | 15 | #include <linux/mmiotrace.h> |
3cbd09e4 | 16 | |
1da177e4 | 17 | #include <asm/cacheflush.h> |
3cbd09e4 TG |
18 | #include <asm/e820.h> |
19 | #include <asm/fixmap.h> | |
1da177e4 | 20 | #include <asm/pgtable.h> |
3cbd09e4 | 21 | #include <asm/tlbflush.h> |
f6df72e7 | 22 | #include <asm/pgalloc.h> |
d7677d40 | 23 | #include <asm/pat.h> |
1da177e4 | 24 | |
78c86e5e | 25 | #include "physaddr.h" |
240d3a7c | 26 | |
e9332cac TG |
27 | /* |
28 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | |
29 | * conflicts. | |
30 | */ | |
3a96ce8c | 31 | int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
b14097bd | 32 | enum page_cache_mode pcm) |
e9332cac | 33 | { |
d806e5ee | 34 | unsigned long nrpages = size >> PAGE_SHIFT; |
93809be8 | 35 | int err; |
e9332cac | 36 | |
b14097bd JG |
37 | switch (pcm) { |
38 | case _PAGE_CACHE_MODE_UC: | |
d806e5ee | 39 | default: |
1219333d | 40 | err = _set_memory_uc(vaddr, nrpages); |
d806e5ee | 41 | break; |
b14097bd | 42 | case _PAGE_CACHE_MODE_WC: |
b310f381 | 43 | err = _set_memory_wc(vaddr, nrpages); |
44 | break; | |
b14097bd | 45 | case _PAGE_CACHE_MODE_WB: |
1219333d | 46 | err = _set_memory_wb(vaddr, nrpages); |
d806e5ee TG |
47 | break; |
48 | } | |
e9332cac TG |
49 | |
50 | return err; | |
51 | } | |
52 | ||
c81c8a1e RD |
53 | static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, |
54 | void *arg) | |
55 | { | |
56 | unsigned long i; | |
57 | ||
58 | for (i = 0; i < nr_pages; ++i) | |
59 | if (pfn_valid(start_pfn + i) && | |
60 | !PageReserved(pfn_to_page(start_pfn + i))) | |
61 | return 1; | |
62 | ||
63 | WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); | |
64 | ||
65 | return 0; | |
66 | } | |
67 | ||
1da177e4 LT |
68 | /* |
69 | * Remap an arbitrary physical address space into the kernel virtual | |
5d72b4fb TK |
70 | * address space. It transparently creates kernel huge I/O mapping when |
71 | * the physical address is aligned by a huge page size (1GB or 2MB) and | |
72 | * the requested size is at least the huge page size. | |
73 | * | |
74 | * NOTE: MTRRs can override PAT memory types with a 4KB granularity. | |
75 | * Therefore, the mapping code falls back to use a smaller page toward 4KB | |
76 | * when a mapping range is covered by non-WB type of MTRRs. | |
1da177e4 LT |
77 | * |
78 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | |
79 | * have to convert them into an offset in a page-aligned mapping, but the | |
80 | * caller shouldn't need to know that small detail. | |
81 | */ | |
23016969 | 82 | static void __iomem *__ioremap_caller(resource_size_t phys_addr, |
b14097bd | 83 | unsigned long size, enum page_cache_mode pcm, void *caller) |
1da177e4 | 84 | { |
ffa71f33 KK |
85 | unsigned long offset, vaddr; |
86 | resource_size_t pfn, last_pfn, last_addr; | |
87e547fe PP |
87 | const resource_size_t unaligned_phys_addr = phys_addr; |
88 | const unsigned long unaligned_size = size; | |
91eebf40 | 89 | struct vm_struct *area; |
b14097bd | 90 | enum page_cache_mode new_pcm; |
d806e5ee | 91 | pgprot_t prot; |
dee7cbb2 | 92 | int retval; |
d61fc448 | 93 | void __iomem *ret_addr; |
906e36c5 | 94 | int ram_region; |
1da177e4 LT |
95 | |
96 | /* Don't allow wraparound or zero size */ | |
97 | last_addr = phys_addr + size - 1; | |
98 | if (!size || last_addr < phys_addr) | |
99 | return NULL; | |
100 | ||
e3100c82 | 101 | if (!phys_addr_valid(phys_addr)) { |
6997ab49 | 102 | printk(KERN_WARNING "ioremap: invalid physical address %llx\n", |
4c8337ac | 103 | (unsigned long long)phys_addr); |
e3100c82 TG |
104 | WARN_ON_ONCE(1); |
105 | return NULL; | |
106 | } | |
107 | ||
1da177e4 LT |
108 | /* |
109 | * Don't remap the low PCI/ISA area, it's always mapped.. | |
110 | */ | |
bcc643dc | 111 | if (is_ISA_range(phys_addr, last_addr)) |
4b40fcee | 112 | return (__force void __iomem *)phys_to_virt(phys_addr); |
1da177e4 LT |
113 | |
114 | /* | |
115 | * Don't allow anybody to remap normal RAM that we're using.. | |
116 | */ | |
906e36c5 MT |
117 | /* First check if whole region can be identified as RAM or not */ |
118 | ram_region = region_is_ram(phys_addr, size); | |
119 | if (ram_region > 0) { | |
120 | WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", | |
121 | (unsigned long int)phys_addr, | |
122 | (unsigned long int)last_addr); | |
c81c8a1e | 123 | return NULL; |
906e36c5 | 124 | } |
1da177e4 | 125 | |
906e36c5 MT |
126 | /* If could not be identified(-1), check page by page */ |
127 | if (ram_region < 0) { | |
128 | pfn = phys_addr >> PAGE_SHIFT; | |
129 | last_pfn = last_addr >> PAGE_SHIFT; | |
130 | if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, | |
131 | __ioremap_check_ram) == 1) | |
132 | return NULL; | |
133 | } | |
d7677d40 | 134 | /* |
135 | * Mappings have to be page-aligned | |
136 | */ | |
137 | offset = phys_addr & ~PAGE_MASK; | |
ffa71f33 | 138 | phys_addr &= PHYSICAL_PAGE_MASK; |
d7677d40 | 139 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
140 | ||
e213e877 | 141 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, |
e00c8cc9 | 142 | pcm, &new_pcm); |
dee7cbb2 | 143 | if (retval) { |
279e669b | 144 | printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); |
dee7cbb2 VP |
145 | return NULL; |
146 | } | |
147 | ||
b14097bd JG |
148 | if (pcm != new_pcm) { |
149 | if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { | |
279e669b | 150 | printk(KERN_ERR |
b14097bd | 151 | "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n", |
4c8337ac RD |
152 | (unsigned long long)phys_addr, |
153 | (unsigned long long)(phys_addr + size), | |
b14097bd | 154 | pcm, new_pcm); |
de2a47cf | 155 | goto err_free_memtype; |
d7677d40 | 156 | } |
b14097bd | 157 | pcm = new_pcm; |
d7677d40 | 158 | } |
159 | ||
b14097bd JG |
160 | prot = PAGE_KERNEL_IO; |
161 | switch (pcm) { | |
162 | case _PAGE_CACHE_MODE_UC: | |
d806e5ee | 163 | default: |
b14097bd JG |
164 | prot = __pgprot(pgprot_val(prot) | |
165 | cachemode2protval(_PAGE_CACHE_MODE_UC)); | |
d806e5ee | 166 | break; |
b14097bd JG |
167 | case _PAGE_CACHE_MODE_UC_MINUS: |
168 | prot = __pgprot(pgprot_val(prot) | | |
169 | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); | |
de33c442 | 170 | break; |
b14097bd JG |
171 | case _PAGE_CACHE_MODE_WC: |
172 | prot = __pgprot(pgprot_val(prot) | | |
173 | cachemode2protval(_PAGE_CACHE_MODE_WC)); | |
b310f381 | 174 | break; |
b14097bd | 175 | case _PAGE_CACHE_MODE_WB: |
d806e5ee TG |
176 | break; |
177 | } | |
a148ecfd | 178 | |
1da177e4 LT |
179 | /* |
180 | * Ok, go for it.. | |
181 | */ | |
23016969 | 182 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
1da177e4 | 183 | if (!area) |
de2a47cf | 184 | goto err_free_memtype; |
1da177e4 | 185 | area->phys_addr = phys_addr; |
e66aadbe | 186 | vaddr = (unsigned long) area->addr; |
43a432b1 | 187 | |
b14097bd | 188 | if (kernel_map_sync_memtype(phys_addr, size, pcm)) |
de2a47cf | 189 | goto err_free_area; |
e9332cac | 190 | |
de2a47cf XF |
191 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) |
192 | goto err_free_area; | |
e9332cac | 193 | |
d61fc448 | 194 | ret_addr = (void __iomem *) (vaddr + offset); |
87e547fe | 195 | mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); |
d61fc448 | 196 | |
c7a7b814 TG |
197 | /* |
198 | * Check if the request spans more than any BAR in the iomem resource | |
199 | * tree. | |
200 | */ | |
201 | WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size), | |
202 | KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); | |
203 | ||
d61fc448 | 204 | return ret_addr; |
de2a47cf XF |
205 | err_free_area: |
206 | free_vm_area(area); | |
207 | err_free_memtype: | |
208 | free_memtype(phys_addr, phys_addr + size); | |
209 | return NULL; | |
1da177e4 | 210 | } |
1da177e4 LT |
211 | |
212 | /** | |
213 | * ioremap_nocache - map bus memory into CPU space | |
9efc31b8 | 214 | * @phys_addr: bus address of the memory |
1da177e4 LT |
215 | * @size: size of the resource to map |
216 | * | |
217 | * ioremap_nocache performs a platform specific sequence of operations to | |
218 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
219 | * writew/writel functions and the other mmio helpers. The returned | |
220 | * address is not guaranteed to be usable directly as a virtual | |
91eebf40 | 221 | * address. |
1da177e4 LT |
222 | * |
223 | * This version of ioremap ensures that the memory is marked uncachable | |
224 | * on the CPU as well as honouring existing caching rules from things like | |
91eebf40 | 225 | * the PCI bus. Note that there are other caches and buffers on many |
1da177e4 LT |
226 | * busses. In particular driver authors should read up on PCI writes |
227 | * | |
228 | * It's useful if some control registers are in such an area and | |
229 | * write combining or read caching is not desirable: | |
91eebf40 | 230 | * |
1da177e4 LT |
231 | * Must be freed with iounmap. |
232 | */ | |
b9e76a00 | 233 | void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) |
1da177e4 | 234 | { |
de33c442 SS |
235 | /* |
236 | * Ideally, this should be: | |
cb32edf6 | 237 | * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS; |
de33c442 SS |
238 | * |
239 | * Till we fix all X drivers to use ioremap_wc(), we will use | |
e4b6be33 LR |
240 | * UC MINUS. Drivers that are certain they need or can already |
241 | * be converted over to strong UC can use ioremap_uc(). | |
de33c442 | 242 | */ |
b14097bd | 243 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS; |
de33c442 | 244 | |
b14097bd | 245 | return __ioremap_caller(phys_addr, size, pcm, |
23016969 | 246 | __builtin_return_address(0)); |
1da177e4 | 247 | } |
129f6946 | 248 | EXPORT_SYMBOL(ioremap_nocache); |
1da177e4 | 249 | |
e4b6be33 LR |
250 | /** |
251 | * ioremap_uc - map bus memory into CPU space as strongly uncachable | |
252 | * @phys_addr: bus address of the memory | |
253 | * @size: size of the resource to map | |
254 | * | |
255 | * ioremap_uc performs a platform specific sequence of operations to | |
256 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
257 | * writew/writel functions and the other mmio helpers. The returned | |
258 | * address is not guaranteed to be usable directly as a virtual | |
259 | * address. | |
260 | * | |
261 | * This version of ioremap ensures that the memory is marked with a strong | |
262 | * preference as completely uncachable on the CPU when possible. For non-PAT | |
263 | * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT | |
264 | * systems this will set the PAT entry for the pages as strong UC. This call | |
265 | * will honor existing caching rules from things like the PCI bus. Note that | |
266 | * there are other caches and buffers on many busses. In particular driver | |
267 | * authors should read up on PCI writes. | |
268 | * | |
269 | * It's useful if some control registers are in such an area and | |
270 | * write combining or read caching is not desirable: | |
271 | * | |
272 | * Must be freed with iounmap. | |
273 | */ | |
274 | void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size) | |
275 | { | |
276 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC; | |
277 | ||
278 | return __ioremap_caller(phys_addr, size, pcm, | |
279 | __builtin_return_address(0)); | |
280 | } | |
281 | EXPORT_SYMBOL_GPL(ioremap_uc); | |
282 | ||
b310f381 | 283 | /** |
284 | * ioremap_wc - map memory into CPU space write combined | |
9efc31b8 | 285 | * @phys_addr: bus address of the memory |
b310f381 | 286 | * @size: size of the resource to map |
287 | * | |
288 | * This version of ioremap ensures that the memory is marked write combining. | |
289 | * Write combining allows faster writes to some hardware devices. | |
290 | * | |
291 | * Must be freed with iounmap. | |
292 | */ | |
d639bab8 | 293 | void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) |
b310f381 | 294 | { |
cb32edf6 | 295 | if (pat_enabled()) |
b14097bd | 296 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, |
23016969 | 297 | __builtin_return_address(0)); |
b310f381 | 298 | else |
299 | return ioremap_nocache(phys_addr, size); | |
300 | } | |
301 | EXPORT_SYMBOL(ioremap_wc); | |
302 | ||
b9e76a00 | 303 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) |
5f868152 | 304 | { |
b14097bd | 305 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, |
23016969 | 306 | __builtin_return_address(0)); |
5f868152 TG |
307 | } |
308 | EXPORT_SYMBOL(ioremap_cache); | |
309 | ||
28b2ee20 RR |
310 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, |
311 | unsigned long prot_val) | |
312 | { | |
b14097bd JG |
313 | return __ioremap_caller(phys_addr, size, |
314 | pgprot2cachemode(__pgprot(prot_val)), | |
28b2ee20 RR |
315 | __builtin_return_address(0)); |
316 | } | |
317 | EXPORT_SYMBOL(ioremap_prot); | |
318 | ||
bf5421c3 AK |
319 | /** |
320 | * iounmap - Free a IO remapping | |
321 | * @addr: virtual address from ioremap_* | |
322 | * | |
323 | * Caller must ensure there is only one unmapping for the same pointer. | |
324 | */ | |
1da177e4 LT |
325 | void iounmap(volatile void __iomem *addr) |
326 | { | |
bf5421c3 | 327 | struct vm_struct *p, *o; |
c23a4e96 AM |
328 | |
329 | if ((void __force *)addr <= high_memory) | |
1da177e4 LT |
330 | return; |
331 | ||
332 | /* | |
333 | * __ioremap special-cases the PCI/ISA range by not instantiating a | |
334 | * vm_area and by simply returning an address into the kernel mapping | |
335 | * of ISA space. So handle that here. | |
336 | */ | |
6e92a5a6 TG |
337 | if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && |
338 | (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) | |
1da177e4 LT |
339 | return; |
340 | ||
91eebf40 TG |
341 | addr = (volatile void __iomem *) |
342 | (PAGE_MASK & (unsigned long __force)addr); | |
bf5421c3 | 343 | |
d61fc448 PP |
344 | mmiotrace_iounmap(addr); |
345 | ||
bf5421c3 AK |
346 | /* Use the vm area unlocked, assuming the caller |
347 | ensures there isn't another iounmap for the same address | |
348 | in parallel. Reuse of the virtual address is prevented by | |
349 | leaving it in the global lists until we're done with it. | |
350 | cpa takes care of the direct mappings. */ | |
ef932473 | 351 | p = find_vm_area((void __force *)addr); |
bf5421c3 AK |
352 | |
353 | if (!p) { | |
91eebf40 | 354 | printk(KERN_ERR "iounmap: bad address %p\n", addr); |
c23a4e96 | 355 | dump_stack(); |
bf5421c3 | 356 | return; |
1da177e4 LT |
357 | } |
358 | ||
d7677d40 | 359 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); |
360 | ||
bf5421c3 | 361 | /* Finally remove it */ |
6e92a5a6 | 362 | o = remove_vm_area((void __force *)addr); |
bf5421c3 | 363 | BUG_ON(p != o || o == NULL); |
91eebf40 | 364 | kfree(p); |
1da177e4 | 365 | } |
129f6946 | 366 | EXPORT_SYMBOL(iounmap); |
1da177e4 | 367 | |
1e6277de | 368 | int __init arch_ioremap_pud_supported(void) |
5d72b4fb TK |
369 | { |
370 | #ifdef CONFIG_X86_64 | |
371 | return cpu_has_gbpages; | |
372 | #else | |
373 | return 0; | |
374 | #endif | |
375 | } | |
376 | ||
1e6277de | 377 | int __init arch_ioremap_pmd_supported(void) |
5d72b4fb TK |
378 | { |
379 | return cpu_has_pse; | |
380 | } | |
381 | ||
e045fb2a | 382 | /* |
383 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | |
384 | * access | |
385 | */ | |
4707a341 | 386 | void *xlate_dev_mem_ptr(phys_addr_t phys) |
e045fb2a | 387 | { |
94d4b476 IM |
388 | unsigned long start = phys & PAGE_MASK; |
389 | unsigned long offset = phys & ~PAGE_MASK; | |
390 | unsigned long vaddr; | |
e045fb2a | 391 | |
392 | /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ | |
393 | if (page_is_ram(start >> PAGE_SHIFT)) | |
394 | return __va(phys); | |
395 | ||
94d4b476 IM |
396 | vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE); |
397 | /* Only add the offset on success and return NULL if the ioremap() failed: */ | |
398 | if (vaddr) | |
399 | vaddr += offset; | |
e045fb2a | 400 | |
94d4b476 | 401 | return (void *)vaddr; |
e045fb2a | 402 | } |
403 | ||
4707a341 | 404 | void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) |
e045fb2a | 405 | { |
406 | if (page_is_ram(phys >> PAGE_SHIFT)) | |
407 | return; | |
408 | ||
409 | iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); | |
410 | return; | |
411 | } | |
412 | ||
45c7b28f | 413 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; |
0947b2f3 | 414 | |
551889a6 | 415 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) |
0947b2f3 | 416 | { |
37cc8d7f JF |
417 | /* Don't assume we're using swapper_pg_dir at this point */ |
418 | pgd_t *base = __va(read_cr3()); | |
419 | pgd_t *pgd = &base[pgd_index(addr)]; | |
551889a6 IC |
420 | pud_t *pud = pud_offset(pgd, addr); |
421 | pmd_t *pmd = pmd_offset(pud, addr); | |
422 | ||
423 | return pmd; | |
0947b2f3 HY |
424 | } |
425 | ||
551889a6 | 426 | static inline pte_t * __init early_ioremap_pte(unsigned long addr) |
0947b2f3 | 427 | { |
551889a6 | 428 | return &bm_pte[pte_index(addr)]; |
0947b2f3 HY |
429 | } |
430 | ||
fef5ba79 JF |
431 | bool __init is_early_ioremap_ptep(pte_t *ptep) |
432 | { | |
433 | return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; | |
434 | } | |
435 | ||
beacfaac | 436 | void __init early_ioremap_init(void) |
0947b2f3 | 437 | { |
551889a6 | 438 | pmd_t *pmd; |
0947b2f3 | 439 | |
73159fdc AL |
440 | #ifdef CONFIG_X86_64 |
441 | BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); | |
442 | #else | |
443 | WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); | |
444 | #endif | |
445 | ||
5b7c73e0 | 446 | early_ioremap_setup(); |
8827247f | 447 | |
551889a6 | 448 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); |
45c7b28f JF |
449 | memset(bm_pte, 0, sizeof(bm_pte)); |
450 | pmd_populate_kernel(&init_mm, pmd, bm_pte); | |
551889a6 | 451 | |
0e3a9549 | 452 | /* |
551889a6 | 453 | * The boot-ioremap range spans multiple pmds, for which |
0e3a9549 IM |
454 | * we are not prepared: |
455 | */ | |
499a5f1e JB |
456 | #define __FIXADDR_TOP (-PAGE_SIZE) |
457 | BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) | |
458 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); | |
459 | #undef __FIXADDR_TOP | |
551889a6 | 460 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { |
0e3a9549 | 461 | WARN_ON(1); |
551889a6 IC |
462 | printk(KERN_WARNING "pmd %p != %p\n", |
463 | pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); | |
91eebf40 | 464 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", |
551889a6 | 465 | fix_to_virt(FIX_BTMAP_BEGIN)); |
91eebf40 | 466 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", |
551889a6 | 467 | fix_to_virt(FIX_BTMAP_END)); |
91eebf40 TG |
468 | |
469 | printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); | |
470 | printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", | |
471 | FIX_BTMAP_BEGIN); | |
0e3a9549 | 472 | } |
0947b2f3 HY |
473 | } |
474 | ||
5b7c73e0 MS |
475 | void __init __early_set_fixmap(enum fixed_addresses idx, |
476 | phys_addr_t phys, pgprot_t flags) | |
0947b2f3 | 477 | { |
551889a6 IC |
478 | unsigned long addr = __fix_to_virt(idx); |
479 | pte_t *pte; | |
0947b2f3 HY |
480 | |
481 | if (idx >= __end_of_fixed_addresses) { | |
482 | BUG(); | |
483 | return; | |
484 | } | |
beacfaac | 485 | pte = early_ioremap_pte(addr); |
4583ed51 | 486 | |
0947b2f3 | 487 | if (pgprot_val(flags)) |
551889a6 | 488 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); |
0947b2f3 | 489 | else |
4f9c11dd | 490 | pte_clear(&init_mm, addr, pte); |
0947b2f3 HY |
491 | __flush_tlb_one(addr); |
492 | } |