Commit | Line | Data |
---|---|---|
13f876ba TG |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_HIGHMEM_INTERNAL_H | |
3 | #define _LINUX_HIGHMEM_INTERNAL_H | |
4 | ||
5 | /* | |
6 | * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft. | |
7 | */ | |
8 | #ifdef CONFIG_KMAP_LOCAL | |
9 | void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot); | |
10 | void *__kmap_local_page_prot(struct page *page, pgprot_t prot); | |
11 | void kunmap_local_indexed(void *vaddr); | |
5fbda3ec TG |
12 | void kmap_local_fork(struct task_struct *tsk); |
13 | void __kmap_local_sched_out(void); | |
14 | void __kmap_local_sched_in(void); | |
15 | static inline void kmap_assert_nomap(void) | |
16 | { | |
17 | DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx); | |
18 | } | |
19 | #else | |
20 | static inline void kmap_local_fork(struct task_struct *tsk) { } | |
21 | static inline void kmap_assert_nomap(void) { } | |
13f876ba TG |
22 | #endif |
23 | ||
24 | #ifdef CONFIG_HIGHMEM | |
25 | #include <asm/highmem.h> | |
26 | ||
27 | #ifndef ARCH_HAS_KMAP_FLUSH_TLB | |
28 | static inline void kmap_flush_tlb(unsigned long addr) { } | |
29 | #endif | |
30 | ||
31 | #ifndef kmap_prot | |
32 | #define kmap_prot PAGE_KERNEL | |
33 | #endif | |
34 | ||
35 | void *kmap_high(struct page *page); | |
36 | void kunmap_high(struct page *page); | |
37 | void __kmap_flush_unused(void); | |
38 | struct page *__kmap_to_page(void *addr); | |
39 | ||
40 | static inline void *kmap(struct page *page) | |
41 | { | |
42 | void *addr; | |
43 | ||
44 | might_sleep(); | |
45 | if (!PageHighMem(page)) | |
46 | addr = page_address(page); | |
47 | else | |
48 | addr = kmap_high(page); | |
49 | kmap_flush_tlb((unsigned long)addr); | |
50 | return addr; | |
51 | } | |
52 | ||
53 | static inline void kunmap(struct page *page) | |
54 | { | |
55 | might_sleep(); | |
56 | if (!PageHighMem(page)) | |
57 | return; | |
58 | kunmap_high(page); | |
59 | } | |
60 | ||
61 | static inline struct page *kmap_to_page(void *addr) | |
62 | { | |
63 | return __kmap_to_page(addr); | |
64 | } | |
65 | ||
66 | static inline void kmap_flush_unused(void) | |
67 | { | |
68 | __kmap_flush_unused(); | |
69 | } | |
70 | ||
f3ba3c71 TG |
71 | static inline void *kmap_local_page(struct page *page) |
72 | { | |
73 | return __kmap_local_page_prot(page, kmap_prot); | |
74 | } | |
75 | ||
76 | static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot) | |
77 | { | |
78 | return __kmap_local_page_prot(page, prot); | |
79 | } | |
80 | ||
81 | static inline void *kmap_local_pfn(unsigned long pfn) | |
82 | { | |
83 | return __kmap_local_pfn_prot(pfn, kmap_prot); | |
84 | } | |
85 | ||
86 | static inline void __kunmap_local(void *vaddr) | |
87 | { | |
88 | kunmap_local_indexed(vaddr); | |
89 | } | |
90 | ||
13f876ba TG |
91 | static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
92 | { | |
51386120 SAS |
93 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
94 | migrate_disable(); | |
95 | else | |
96 | preempt_disable(); | |
97 | ||
13f876ba TG |
98 | pagefault_disable(); |
99 | return __kmap_local_page_prot(page, prot); | |
100 | } | |
101 | ||
102 | static inline void *kmap_atomic(struct page *page) | |
103 | { | |
104 | return kmap_atomic_prot(page, kmap_prot); | |
105 | } | |
106 | ||
107 | static inline void *kmap_atomic_pfn(unsigned long pfn) | |
108 | { | |
51386120 SAS |
109 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
110 | migrate_disable(); | |
111 | else | |
112 | preempt_disable(); | |
113 | ||
13f876ba TG |
114 | pagefault_disable(); |
115 | return __kmap_local_pfn_prot(pfn, kmap_prot); | |
116 | } | |
117 | ||
118 | static inline void __kunmap_atomic(void *addr) | |
119 | { | |
120 | kunmap_local_indexed(addr); | |
121 | pagefault_enable(); | |
51386120 SAS |
122 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
123 | migrate_enable(); | |
124 | else | |
125 | preempt_enable(); | |
13f876ba TG |
126 | } |
127 | ||
128 | unsigned int __nr_free_highpages(void); | |
129 | extern atomic_long_t _totalhigh_pages; | |
130 | ||
131 | static inline unsigned int nr_free_highpages(void) | |
132 | { | |
133 | return __nr_free_highpages(); | |
134 | } | |
135 | ||
136 | static inline unsigned long totalhigh_pages(void) | |
137 | { | |
138 | return (unsigned long)atomic_long_read(&_totalhigh_pages); | |
139 | } | |
140 | ||
13f876ba TG |
141 | static inline void totalhigh_pages_add(long count) |
142 | { | |
143 | atomic_long_add(count, &_totalhigh_pages); | |
144 | } | |
145 | ||
146 | #else /* CONFIG_HIGHMEM */ | |
147 | ||
148 | static inline struct page *kmap_to_page(void *addr) | |
149 | { | |
150 | return virt_to_page(addr); | |
151 | } | |
152 | ||
153 | static inline void *kmap(struct page *page) | |
154 | { | |
155 | might_sleep(); | |
156 | return page_address(page); | |
157 | } | |
158 | ||
159 | static inline void kunmap_high(struct page *page) { } | |
160 | static inline void kmap_flush_unused(void) { } | |
161 | ||
162 | static inline void kunmap(struct page *page) | |
163 | { | |
164 | #ifdef ARCH_HAS_FLUSH_ON_KUNMAP | |
165 | kunmap_flush_on_unmap(page_address(page)); | |
166 | #endif | |
167 | } | |
168 | ||
f3ba3c71 TG |
169 | static inline void *kmap_local_page(struct page *page) |
170 | { | |
171 | return page_address(page); | |
172 | } | |
173 | ||
174 | static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot) | |
175 | { | |
176 | return kmap_local_page(page); | |
177 | } | |
178 | ||
179 | static inline void *kmap_local_pfn(unsigned long pfn) | |
180 | { | |
181 | return kmap_local_page(pfn_to_page(pfn)); | |
182 | } | |
183 | ||
184 | static inline void __kunmap_local(void *addr) | |
185 | { | |
186 | #ifdef ARCH_HAS_FLUSH_ON_KUNMAP | |
187 | kunmap_flush_on_unmap(addr); | |
188 | #endif | |
189 | } | |
190 | ||
13f876ba TG |
191 | static inline void *kmap_atomic(struct page *page) |
192 | { | |
51386120 SAS |
193 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
194 | migrate_disable(); | |
195 | else | |
196 | preempt_disable(); | |
13f876ba TG |
197 | pagefault_disable(); |
198 | return page_address(page); | |
199 | } | |
200 | ||
201 | static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) | |
202 | { | |
203 | return kmap_atomic(page); | |
204 | } | |
205 | ||
206 | static inline void *kmap_atomic_pfn(unsigned long pfn) | |
207 | { | |
208 | return kmap_atomic(pfn_to_page(pfn)); | |
209 | } | |
210 | ||
211 | static inline void __kunmap_atomic(void *addr) | |
212 | { | |
213 | #ifdef ARCH_HAS_FLUSH_ON_KUNMAP | |
214 | kunmap_flush_on_unmap(addr); | |
215 | #endif | |
216 | pagefault_enable(); | |
51386120 SAS |
217 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
218 | migrate_enable(); | |
219 | else | |
220 | preempt_enable(); | |
13f876ba TG |
221 | } |
222 | ||
223 | static inline unsigned int nr_free_highpages(void) { return 0; } | |
224 | static inline unsigned long totalhigh_pages(void) { return 0UL; } | |
225 | ||
226 | #endif /* CONFIG_HIGHMEM */ | |
227 | ||
228 | /* | |
229 | * Prevent people trying to call kunmap_atomic() as if it were kunmap() | |
230 | * kunmap_atomic() should get the return value of kmap_atomic, not the page. | |
231 | */ | |
232 | #define kunmap_atomic(__addr) \ | |
233 | do { \ | |
234 | BUILD_BUG_ON(__same_type((__addr), struct page *)); \ | |
235 | __kunmap_atomic(__addr); \ | |
236 | } while (0) | |
237 | ||
f3ba3c71 TG |
238 | #define kunmap_local(__addr) \ |
239 | do { \ | |
240 | BUILD_BUG_ON(__same_type((__addr), struct page *)); \ | |
241 | __kunmap_local(__addr); \ | |
242 | } while (0) | |
243 | ||
13f876ba | 244 | #endif |