Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #include <linux/module.h> |
2 | #include <linux/highmem.h> | |
3 | #include <asm/tlbflush.h> | |
4 | ||
5 | void *__kmap(struct page *page) | |
6 | { | |
7 | void *addr; | |
8 | ||
9 | might_sleep(); | |
10 | if (!PageHighMem(page)) | |
11 | return page_address(page); | |
12 | addr = kmap_high(page); | |
13 | flush_tlb_one((unsigned long)addr); | |
14 | ||
15 | return addr; | |
16 | } | |
17 | ||
18 | void __kunmap(struct page *page) | |
19 | { | |
b72b7092 | 20 | BUG_ON(in_interrupt()); |
1da177e4 LT |
21 | if (!PageHighMem(page)) |
22 | return; | |
23 | kunmap_high(page); | |
24 | } | |
25 | ||
26 | /* | |
27 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | |
28 | * no global lock is needed and because the kmap code must perform a global TLB | |
29 | * invalidation when the kmap pool wraps. | |
30 | * | |
31 | * However when holding an atomic kmap is is not legal to sleep, so atomic | |
32 | * kmaps are appropriate for short, tight code paths only. | |
33 | */ | |
34 | ||
35 | void *__kmap_atomic(struct page *page, enum km_type type) | |
36 | { | |
37 | enum fixed_addresses idx; | |
38 | unsigned long vaddr; | |
39 | ||
40 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | |
a866374a | 41 | pagefault_disable(); |
1da177e4 LT |
42 | if (!PageHighMem(page)) |
43 | return page_address(page); | |
44 | ||
7ca43e75 | 45 | debug_kmap_atomic(type); |
1da177e4 LT |
46 | idx = type + KM_TYPE_NR*smp_processor_id(); |
47 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
48 | #ifdef CONFIG_DEBUG_HIGHMEM | |
b72b7092 | 49 | BUG_ON(!pte_none(*(kmap_pte - idx))); |
1da177e4 LT |
50 | #endif |
51 | set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); | |
52 | local_flush_tlb_one((unsigned long)vaddr); | |
53 | ||
54 | return (void*) vaddr; | |
55 | } | |
56 | ||
57 | void __kunmap_atomic(void *kvaddr, enum km_type type) | |
58 | { | |
59 | #ifdef CONFIG_DEBUG_HIGHMEM | |
60 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | |
61 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | |
62 | ||
63 | if (vaddr < FIXADDR_START) { // FIXME | |
a866374a | 64 | pagefault_enable(); |
1da177e4 LT |
65 | return; |
66 | } | |
67 | ||
b72b7092 | 68 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
1da177e4 LT |
69 | |
70 | /* | |
71 | * force other mappings to Oops if they'll try to access | |
72 | * this pte without first remap it | |
73 | */ | |
74 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | |
75 | local_flush_tlb_one(vaddr); | |
76 | #endif | |
77 | ||
a866374a | 78 | pagefault_enable(); |
1da177e4 LT |
79 | } |
80 | ||
60080265 RB |
81 | /* |
82 | * This is the same as kmap_atomic() but can map memory that doesn't | |
83 | * have a struct page associated with it. | |
84 | */ | |
85 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | |
86 | { | |
87 | enum fixed_addresses idx; | |
88 | unsigned long vaddr; | |
89 | ||
a866374a | 90 | pagefault_disable(); |
60080265 | 91 | |
7ca43e75 | 92 | debug_kmap_atomic(type); |
60080265 RB |
93 | idx = type + KM_TYPE_NR*smp_processor_id(); |
94 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
95 | set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); | |
96 | flush_tlb_one(vaddr); | |
97 | ||
98 | return (void*) vaddr; | |
99 | } | |
100 | ||
1da177e4 LT |
101 | struct page *__kmap_atomic_to_page(void *ptr) |
102 | { | |
103 | unsigned long idx, vaddr = (unsigned long)ptr; | |
104 | pte_t *pte; | |
105 | ||
106 | if (vaddr < FIXADDR_START) | |
107 | return virt_to_page(ptr); | |
108 | ||
109 | idx = virt_to_fix(vaddr); | |
110 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | |
111 | return pte_page(*pte); | |
112 | } | |
113 | ||
114 | EXPORT_SYMBOL(__kmap); | |
115 | EXPORT_SYMBOL(__kunmap); | |
116 | EXPORT_SYMBOL(__kmap_atomic); | |
117 | EXPORT_SYMBOL(__kunmap_atomic); |