x86: fix ioremap pgprot inconsistency
[linux-2.6-block.git] / arch / x86 / mm / ioremap_32.c
CommitLineData
1da177e4
LT
1/*
2 * arch/i386/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
10
11#include <linux/vmalloc.h>
12#include <linux/init.h>
13#include <linux/slab.h>
129f6946 14#include <linux/module.h>
a148ecfd 15#include <linux/io.h>
1da177e4
LT
16#include <asm/fixmap.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19#include <asm/pgtable.h>
20
21#define ISA_START_ADDRESS 0xa0000
22#define ISA_END_ADDRESS 0x100000
23
1da177e4
LT
24/*
25 * Generic mapping function (not visible outside):
26 */
27
28/*
29 * Remap an arbitrary physical address space into the kernel virtual
30 * address space. Needed when the kernel wants to access high addresses
31 * directly.
32 *
33 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
34 * have to convert them into an offset in a page-aligned mapping, but the
35 * caller shouldn't need to know that small detail.
36 */
37void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
38{
39 void __iomem * addr;
40 struct vm_struct * area;
41 unsigned long offset, last_addr;
a148ecfd 42 pgprot_t prot;
1da177e4
LT
43
44 /* Don't allow wraparound or zero size */
45 last_addr = phys_addr + size - 1;
46 if (!size || last_addr < phys_addr)
47 return NULL;
48
49 /*
50 * Don't remap the low PCI/ISA area, it's always mapped..
51 */
52 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
53 return (void __iomem *) phys_to_virt(phys_addr);
54
55 /*
56 * Don't allow anybody to remap normal RAM that we're using..
57 */
58 if (phys_addr <= virt_to_phys(high_memory - 1)) {
59 char *t_addr, *t_end;
60 struct page *page;
61
62 t_addr = __va(phys_addr);
63 t_end = t_addr + (size - 1);
64
65 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
66 if(!PageReserved(page))
67 return NULL;
68 }
69
a4034349 70 prot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
a148ecfd 71
1da177e4
LT
72 /*
73 * Mappings have to be page-aligned
74 */
75 offset = phys_addr & ~PAGE_MASK;
76 phys_addr &= PAGE_MASK;
77 size = PAGE_ALIGN(last_addr+1) - phys_addr;
78
79 /*
80 * Ok, go for it..
81 */
82 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
83 if (!area)
84 return NULL;
85 area->phys_addr = phys_addr;
86 addr = (void __iomem *) area->addr;
87 if (ioremap_page_range((unsigned long) addr,
a148ecfd 88 (unsigned long) addr + size, phys_addr, prot)) {
1da177e4
LT
89 vunmap((void __force *) addr);
90 return NULL;
91 }
92 return (void __iomem *) (offset + (char __iomem *)addr);
93}
129f6946 94EXPORT_SYMBOL(__ioremap);
1da177e4
LT
95
96/**
97 * ioremap_nocache - map bus memory into CPU space
98 * @offset: bus address of the memory
99 * @size: size of the resource to map
100 *
101 * ioremap_nocache performs a platform specific sequence of operations to
102 * make bus memory CPU accessible via the readb/readw/readl/writeb/
103 * writew/writel functions and the other mmio helpers. The returned
104 * address is not guaranteed to be usable directly as a virtual
105 * address.
106 *
107 * This version of ioremap ensures that the memory is marked uncachable
108 * on the CPU as well as honouring existing caching rules from things like
109 * the PCI bus. Note that there are other caches and buffers on many
110 * busses. In particular driver authors should read up on PCI writes
111 *
112 * It's useful if some control registers are in such an area and
113 * write combining or read caching is not desirable:
114 *
115 * Must be freed with iounmap.
116 */
117
118void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
119{
120 unsigned long last_addr;
4138cc34 121 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
1da177e4
LT
122 if (!p)
123 return p;
124
125 /* Guaranteed to be > phys_addr, as per __ioremap() */
126 last_addr = phys_addr + size - 1;
127
128 if (last_addr < virt_to_phys(high_memory) - 1) {
129 struct page *ppage = virt_to_page(__va(phys_addr));
130 unsigned long npages;
131
132 phys_addr &= PAGE_MASK;
133
134 /* This might overflow and become zero.. */
135 last_addr = PAGE_ALIGN(last_addr);
136
137 /* .. but that's ok, because modulo-2**n arithmetic will make
138 * the page-aligned "last - first" come out right.
139 */
140 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
141
142 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
143 iounmap(p);
144 p = NULL;
145 }
146 global_flush_tlb();
147 }
148
149 return p;
150}
129f6946 151EXPORT_SYMBOL(ioremap_nocache);
1da177e4 152
bf5421c3
AK
153/**
154 * iounmap - Free a IO remapping
155 * @addr: virtual address from ioremap_*
156 *
157 * Caller must ensure there is only one unmapping for the same pointer.
158 */
1da177e4
LT
159void iounmap(volatile void __iomem *addr)
160{
bf5421c3 161 struct vm_struct *p, *o;
c23a4e96
AM
162
163 if ((void __force *)addr <= high_memory)
1da177e4
LT
164 return;
165
166 /*
167 * __ioremap special-cases the PCI/ISA range by not instantiating a
168 * vm_area and by simply returning an address into the kernel mapping
169 * of ISA space. So handle that here.
170 */
171 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
172 addr < phys_to_virt(ISA_END_ADDRESS))
173 return;
174
b16b88e5 175 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
176
177 /* Use the vm area unlocked, assuming the caller
178 ensures there isn't another iounmap for the same address
179 in parallel. Reuse of the virtual address is prevented by
180 leaving it in the global lists until we're done with it.
181 cpa takes care of the direct mappings. */
182 read_lock(&vmlist_lock);
183 for (p = vmlist; p; p = p->next) {
184 if (p->addr == addr)
185 break;
186 }
187 read_unlock(&vmlist_lock);
188
189 if (!p) {
190 printk("iounmap: bad address %p\n", addr);
c23a4e96 191 dump_stack();
bf5421c3 192 return;
1da177e4
LT
193 }
194
bf5421c3 195 /* Reset the direct mapping. Can block */
1da177e4 196 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
1da177e4 197 change_page_attr(virt_to_page(__va(p->phys_addr)),
9585116b 198 get_vm_area_size(p) >> PAGE_SHIFT,
1da177e4
LT
199 PAGE_KERNEL);
200 global_flush_tlb();
201 }
bf5421c3
AK
202
203 /* Finally remove it */
204 o = remove_vm_area((void *)addr);
205 BUG_ON(p != o || o == NULL);
1da177e4
LT
206 kfree(p);
207}
129f6946 208EXPORT_SYMBOL(iounmap);
1da177e4 209
d18d6d65
IM
210
211int __initdata early_ioremap_debug;
212
213static int __init early_ioremap_debug_setup(char *str)
214{
215 early_ioremap_debug = 1;
216
793b24a2 217 return 0;
d18d6d65 218}
793b24a2 219early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 220
0947b2f3
HY
221static __initdata int after_paging_init;
222static __initdata unsigned long bm_pte[1024]
223 __attribute__((aligned(PAGE_SIZE)));
224
beacfaac 225static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
0947b2f3
HY
226{
227 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
228}
229
beacfaac 230static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
0947b2f3
HY
231{
232 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
233}
234
beacfaac 235void __init early_ioremap_init(void)
0947b2f3
HY
236{
237 unsigned long *pgd;
238
d18d6d65
IM
239 if (early_ioremap_debug)
240 printk("early_ioremap_init()\n");
241
beacfaac 242 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3
HY
243 *pgd = __pa(bm_pte) | _PAGE_TABLE;
244 memset(bm_pte, 0, sizeof(bm_pte));
0e3a9549
IM
245 /*
246 * The boot-ioremap range spans multiple pgds, for which
247 * we are not prepared:
248 */
249 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
250 WARN_ON(1);
251 printk("pgd %p != %p\n",
252 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
253 printk("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
254 fix_to_virt(FIX_BTMAP_BEGIN));
255 printk("fix_to_virt(FIX_BTMAP_END): %08lx\n",
256 fix_to_virt(FIX_BTMAP_END));
257
258 printk("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
259 printk("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
260 }
0947b2f3
HY
261}
262
beacfaac 263void __init early_ioremap_clear(void)
0947b2f3
HY
264{
265 unsigned long *pgd;
266
d18d6d65
IM
267 if (early_ioremap_debug)
268 printk("early_ioremap_clear()\n");
269
beacfaac 270 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3
HY
271 *pgd = 0;
272 __flush_tlb_all();
273}
274
beacfaac 275void __init early_ioremap_reset(void)
0947b2f3
HY
276{
277 enum fixed_addresses idx;
278 unsigned long *pte, phys, addr;
279
280 after_paging_init = 1;
64a8f852 281 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 282 addr = fix_to_virt(idx);
beacfaac 283 pte = early_ioremap_pte(addr);
0947b2f3
HY
284 if (!*pte & _PAGE_PRESENT) {
285 phys = *pte & PAGE_MASK;
286 set_fixmap(idx, phys);
287 }
288 }
289}
290
beacfaac 291static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
292 unsigned long phys, pgprot_t flags)
293{
294 unsigned long *pte, addr = __fix_to_virt(idx);
295
296 if (idx >= __end_of_fixed_addresses) {
297 BUG();
298 return;
299 }
beacfaac 300 pte = early_ioremap_pte(addr);
0947b2f3
HY
301 if (pgprot_val(flags))
302 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
303 else
304 *pte = 0;
305 __flush_tlb_one(addr);
306}
307
beacfaac 308static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
309 unsigned long phys)
310{
311 if (after_paging_init)
312 set_fixmap(idx, phys);
313 else
beacfaac 314 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
315}
316
beacfaac 317static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
318{
319 if (after_paging_init)
320 clear_fixmap(idx);
321 else
beacfaac 322 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
323}
324
1b42f516
IM
325
326int __initdata early_ioremap_nested;
327
d690b2af
IM
328static int __init check_early_ioremap_leak(void)
329{
330 if (!early_ioremap_nested)
331 return 0;
332
333 printk(KERN_WARNING
334 "Debug warning: early ioremap leak of %d areas detected.\n",
335 early_ioremap_nested);
336 printk(KERN_WARNING
337 "please boot with early_ioremap_debug and report the dmesg.\n");
338 WARN_ON(1);
339
340 return 1;
341}
342late_initcall(check_early_ioremap_leak);
343
beacfaac 344void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
345{
346 unsigned long offset, last_addr;
1b42f516
IM
347 unsigned int nrpages, nesting;
348 enum fixed_addresses idx0, idx;
349
350 WARN_ON(system_state != SYSTEM_BOOTING);
351
352 nesting = early_ioremap_nested;
d18d6d65
IM
353 if (early_ioremap_debug) {
354 printk("early_ioremap(%08lx, %08lx) [%d] => ",
355 phys_addr, size, nesting);
356 dump_stack();
357 }
1da177e4
LT
358
359 /* Don't allow wraparound or zero size */
360 last_addr = phys_addr + size - 1;
bd796ed0
IM
361 if (!size || last_addr < phys_addr) {
362 WARN_ON(1);
1da177e4 363 return NULL;
bd796ed0 364 }
1da177e4 365
bd796ed0
IM
366 if (nesting >= FIX_BTMAPS_NESTING) {
367 WARN_ON(1);
1b42f516 368 return NULL;
bd796ed0 369 }
1b42f516 370 early_ioremap_nested++;
1da177e4
LT
371 /*
372 * Mappings have to be page-aligned
373 */
374 offset = phys_addr & ~PAGE_MASK;
375 phys_addr &= PAGE_MASK;
376 size = PAGE_ALIGN(last_addr) - phys_addr;
377
378 /*
379 * Mappings have to fit in the FIX_BTMAP area.
380 */
381 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
382 if (nrpages > NR_FIX_BTMAPS) {
383 WARN_ON(1);
1da177e4 384 return NULL;
bd796ed0 385 }
1da177e4
LT
386
387 /*
388 * Ok, go for it..
389 */
1b42f516
IM
390 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
391 idx = idx0;
1da177e4 392 while (nrpages > 0) {
beacfaac 393 early_set_fixmap(idx, phys_addr);
1da177e4
LT
394 phys_addr += PAGE_SIZE;
395 --idx;
396 --nrpages;
397 }
d18d6d65
IM
398 if (early_ioremap_debug)
399 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516
IM
400
401 return (void*) (offset + fix_to_virt(idx0));
1da177e4
LT
402}
403
beacfaac 404void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
405{
406 unsigned long virt_addr;
407 unsigned long offset;
408 unsigned int nrpages;
409 enum fixed_addresses idx;
1b42f516
IM
410 unsigned int nesting;
411
412 nesting = --early_ioremap_nested;
bd796ed0 413 WARN_ON(nesting < 0);
1da177e4 414
d18d6d65
IM
415 if (early_ioremap_debug) {
416 printk("early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting);
417 dump_stack();
418 }
419
1da177e4 420 virt_addr = (unsigned long)addr;
bd796ed0
IM
421 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
422 WARN_ON(1);
1da177e4 423 return;
bd796ed0 424 }
1da177e4
LT
425 offset = virt_addr & ~PAGE_MASK;
426 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
427
1b42f516 428 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 429 while (nrpages > 0) {
beacfaac 430 early_clear_fixmap(idx);
1da177e4
LT
431 --idx;
432 --nrpages;
433 }
434}
1b42f516
IM
435
436void __this_fixmap_does_not_exist(void)
437{
438 WARN_ON(1);
439}