Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_HIGHMEM_H |
3 | #define _LINUX_HIGHMEM_H | |
4 | ||
1da177e4 | 5 | #include <linux/fs.h> |
597781f3 | 6 | #include <linux/kernel.h> |
187f1882 | 7 | #include <linux/bug.h> |
522a0032 | 8 | #include <linux/cacheflush.h> |
1da177e4 | 9 | #include <linux/mm.h> |
ad76fb6b | 10 | #include <linux/uaccess.h> |
43b3a0c7 | 11 | #include <linux/hardirq.h> |
1da177e4 | 12 | |
13f876ba | 13 | #include "highmem-internal.h" |
03beb076 | 14 | |
13f876ba TG |
15 | /** |
16 | * kmap - Map a page for long term usage | |
17 | * @page: Pointer to the page to be mapped | |
18 | * | |
19 | * Returns: The virtual address of the mapping | |
20 | * | |
21 | * Can only be invoked from preemptible task context because on 32bit | |
22 | * systems with CONFIG_HIGHMEM enabled this function might sleep. | |
23 | * | |
24 | * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area | |
25 | * this returns the virtual address of the direct kernel mapping. | |
26 | * | |
27 | * The returned virtual address is globally visible and valid up to the | |
28 | * point where it is unmapped via kunmap(). The pointer can be handed to | |
29 | * other contexts. | |
30 | * | |
31 | * For highmem pages on 32bit systems this can be slow as the mapping space | |
32 | * is limited and protected by a global lock. In case that there is no | |
33 | * mapping slot available the function blocks until a slot is released via | |
34 | * kunmap(). | |
298fa1ad | 35 | */ |
13f876ba | 36 | static inline void *kmap(struct page *page); |
525aaf9b | 37 | |
13f876ba TG |
38 | /** |
39 | * kunmap - Unmap the virtual address mapped by kmap() | |
40 | * @addr: Virtual address to be unmapped | |
41 | * | |
42 | * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of | |
43 | * pages in the low memory area. | |
78b6d91e | 44 | */ |
13f876ba | 45 | static inline void kunmap(struct page *page); |
298fa1ad | 46 | |
13f876ba TG |
47 | /** |
48 | * kmap_to_page - Get the page for a kmap'ed address | |
49 | * @addr: The address to look up | |
50 | * | |
51 | * Returns: The page which is mapped to @addr. | |
52 | */ | |
53 | static inline struct page *kmap_to_page(void *addr); | |
1da177e4 | 54 | |
13f876ba TG |
55 | /** |
56 | * kmap_flush_unused - Flush all unused kmap mappings in order to | |
57 | * remove stray mappings | |
58 | */ | |
59 | static inline void kmap_flush_unused(void); | |
1da177e4 | 60 | |
13f876ba | 61 | /** |
f3ba3c71 | 62 | * kmap_local_page - Map a page for temporary usage |
13f876ba TG |
63 | * @page: Pointer to the page to be mapped |
64 | * | |
65 | * Returns: The virtual address of the mapping | |
66 | * | |
13f876ba TG |
67 | * Can be invoked from any context. |
68 | * | |
69 | * Requires careful handling when nesting multiple mappings because the map | |
70 | * management is stack based. The unmap has to be in the reverse order of | |
71 | * the map operation: | |
72 | * | |
f3ba3c71 TG |
73 | * addr1 = kmap_local_page(page1); |
74 | * addr2 = kmap_local_page(page2); | |
13f876ba | 75 | * ... |
f3ba3c71 TG |
76 | * kunmap_local(addr2); |
77 | * kunmap_local(addr1); | |
13f876ba TG |
78 | * |
79 | * Unmapping addr1 before addr2 is invalid and causes malfunction. | |
80 | * | |
81 | * Contrary to kmap() mappings the mapping is only valid in the context of | |
82 | * the caller and cannot be handed to other contexts. | |
83 | * | |
84 | * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the | |
85 | * virtual address of the direct mapping. Only real highmem pages are | |
86 | * temporarily mapped. | |
87 | * | |
f3ba3c71 TG |
88 | * While it is significantly faster than kmap() for the higmem case it |
89 | * comes with restrictions about the pointer validity. Only use when really | |
90 | * necessary. | |
91 | * | |
92 | * On HIGHMEM enabled systems mapping a highmem page has the side effect of | |
93 | * disabling migration in order to keep the virtual address stable across | |
94 | * preemption. No caller of kmap_local_page() can rely on this side effect. | |
95 | */ | |
96 | static inline void *kmap_local_page(struct page *page); | |
97 | ||
53c36de0 MWO |
98 | /** |
99 | * kmap_local_folio - Map a page in this folio for temporary usage | |
100 | * @folio: The folio containing the page. | |
101 | * @offset: The byte offset within the folio which identifies the page. | |
102 | * | |
103 | * Requires careful handling when nesting multiple mappings because the map | |
104 | * management is stack based. The unmap has to be in the reverse order of | |
105 | * the map operation:: | |
106 | * | |
107 | * addr1 = kmap_local_folio(folio1, offset1); | |
108 | * addr2 = kmap_local_folio(folio2, offset2); | |
109 | * ... | |
110 | * kunmap_local(addr2); | |
111 | * kunmap_local(addr1); | |
112 | * | |
113 | * Unmapping addr1 before addr2 is invalid and causes malfunction. | |
114 | * | |
115 | * Contrary to kmap() mappings the mapping is only valid in the context of | |
116 | * the caller and cannot be handed to other contexts. | |
117 | * | |
118 | * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the | |
119 | * virtual address of the direct mapping. Only real highmem pages are | |
120 | * temporarily mapped. | |
121 | * | |
122 | * While it is significantly faster than kmap() for the higmem case it | |
123 | * comes with restrictions about the pointer validity. Only use when really | |
124 | * necessary. | |
125 | * | |
126 | * On HIGHMEM enabled systems mapping a highmem page has the side effect of | |
127 | * disabling migration in order to keep the virtual address stable across | |
128 | * preemption. No caller of kmap_local_folio() can rely on this side effect. | |
129 | * | |
130 | * Context: Can be invoked from any context. | |
131 | * Return: The virtual address of @offset. | |
132 | */ | |
133 | static inline void *kmap_local_folio(struct folio *folio, size_t offset); | |
134 | ||
f3ba3c71 TG |
135 | /** |
136 | * kmap_atomic - Atomically map a page for temporary usage - Deprecated! | |
137 | * @page: Pointer to the page to be mapped | |
138 | * | |
139 | * Returns: The virtual address of the mapping | |
140 | * | |
141 | * Effectively a wrapper around kmap_local_page() which disables pagefaults | |
142 | * and preemption. | |
143 | * | |
144 | * Do not use in new code. Use kmap_local_page() instead. | |
13f876ba TG |
145 | */ |
146 | static inline void *kmap_atomic(struct page *page); | |
5a178119 | 147 | |
13f876ba TG |
148 | /** |
149 | * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() | |
150 | * @addr: Virtual address to be unmapped | |
151 | * | |
152 | * Counterpart to kmap_atomic(). | |
153 | * | |
f3ba3c71 TG |
154 | * Effectively a wrapper around kunmap_local() which additionally undoes |
155 | * the side effects of kmap_atomic(), i.e. reenabling pagefaults and | |
13f876ba | 156 | * preemption. |
13f876ba | 157 | */ |
c1f60a5a | 158 | |
13f876ba TG |
159 | /* Highmem related interfaces for management code */ |
160 | static inline unsigned int nr_free_highpages(void); | |
161 | static inline unsigned long totalhigh_pages(void); | |
1da177e4 | 162 | |
13f876ba TG |
163 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
164 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | |
e23c4597 IW |
165 | { |
166 | } | |
7438f363 | 167 | #endif |
1da177e4 | 168 | |
f358afc5 | 169 | #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE |
13f876ba | 170 | static inline void flush_kernel_vmap_range(void *vaddr, int size) |
298fa1ad | 171 | { |
298fa1ad | 172 | } |
13f876ba | 173 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) |
298fa1ad | 174 | { |
298fa1ad | 175 | } |
7438f363 | 176 | #endif |
980c19e3 | 177 | |
1da177e4 | 178 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
487ff320 | 179 | #ifndef clear_user_highpage |
1da177e4 LT |
180 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
181 | { | |
d2c20e51 | 182 | void *addr = kmap_local_page(page); |
1da177e4 | 183 | clear_user_page(addr, vaddr, page); |
d2c20e51 | 184 | kunmap_local(addr); |
1da177e4 | 185 | } |
487ff320 | 186 | #endif |
1da177e4 | 187 | |
92638b4e | 188 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE |
769848c0 | 189 | /** |
92638b4e | 190 | * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move |
769848c0 MG |
191 | * @vma: The VMA the page is to be allocated for |
192 | * @vaddr: The virtual address the page will be inserted into | |
193 | * | |
92638b4e PC |
194 | * This function will allocate a page for a VMA that the caller knows will |
195 | * be able to migrate in the future using move_pages() or reclaimed | |
769848c0 MG |
196 | * |
197 | * An architecture may override this function by defining | |
92638b4e | 198 | * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own |
769848c0 MG |
199 | * implementation. |
200 | */ | |
1da177e4 | 201 | static inline struct page * |
92638b4e PC |
202 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, |
203 | unsigned long vaddr) | |
1da177e4 | 204 | { |
92638b4e | 205 | struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); |
1da177e4 LT |
206 | |
207 | if (page) | |
208 | clear_user_highpage(page, vaddr); | |
209 | ||
210 | return page; | |
211 | } | |
212 | #endif | |
213 | ||
214 | static inline void clear_highpage(struct page *page) | |
215 | { | |
d2c20e51 | 216 | void *kaddr = kmap_local_page(page); |
1da177e4 | 217 | clear_page(kaddr); |
d2c20e51 | 218 | kunmap_local(kaddr); |
1da177e4 LT |
219 | } |
220 | ||
013bb59d PC |
221 | #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE |
222 | ||
223 | static inline void tag_clear_highpage(struct page *page) | |
224 | { | |
225 | } | |
226 | ||
227 | #endif | |
228 | ||
0060ef3b MWO |
229 | /* |
230 | * If we pass in a base or tail page, we can zero up to PAGE_SIZE. | |
231 | * If we pass in a head page, we can zero up to the size of the compound page. | |
232 | */ | |
c0357139 | 233 | #ifdef CONFIG_HIGHMEM |
0060ef3b MWO |
234 | void zero_user_segments(struct page *page, unsigned start1, unsigned end1, |
235 | unsigned start2, unsigned end2); | |
c0357139 | 236 | #else |
eebd2aa3 | 237 | static inline void zero_user_segments(struct page *page, |
0060ef3b MWO |
238 | unsigned start1, unsigned end1, |
239 | unsigned start2, unsigned end2) | |
eebd2aa3 | 240 | { |
d2c20e51 | 241 | void *kaddr = kmap_local_page(page); |
0060ef3b | 242 | unsigned int i; |
eebd2aa3 | 243 | |
0060ef3b | 244 | BUG_ON(end1 > page_size(page) || end2 > page_size(page)); |
eebd2aa3 CL |
245 | |
246 | if (end1 > start1) | |
247 | memset(kaddr + start1, 0, end1 - start1); | |
248 | ||
249 | if (end2 > start2) | |
250 | memset(kaddr + start2, 0, end2 - start2); | |
251 | ||
d2c20e51 | 252 | kunmap_local(kaddr); |
0060ef3b MWO |
253 | for (i = 0; i < compound_nr(page); i++) |
254 | flush_dcache_page(page + i); | |
eebd2aa3 | 255 | } |
c0357139 | 256 | #endif |
eebd2aa3 CL |
257 | |
258 | static inline void zero_user_segment(struct page *page, | |
259 | unsigned start, unsigned end) | |
260 | { | |
261 | zero_user_segments(page, start, end, 0, 0); | |
262 | } | |
263 | ||
264 | static inline void zero_user(struct page *page, | |
265 | unsigned start, unsigned size) | |
266 | { | |
267 | zero_user_segments(page, start, start + size, 0, 0); | |
268 | } | |
01f2705d | 269 | |
77fff4ae AN |
270 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE |
271 | ||
9de455b2 AN |
272 | static inline void copy_user_highpage(struct page *to, struct page *from, |
273 | unsigned long vaddr, struct vm_area_struct *vma) | |
1da177e4 LT |
274 | { |
275 | char *vfrom, *vto; | |
276 | ||
d2c20e51 IW |
277 | vfrom = kmap_local_page(from); |
278 | vto = kmap_local_page(to); | |
1da177e4 | 279 | copy_user_page(vto, vfrom, vaddr, to); |
d2c20e51 IW |
280 | kunmap_local(vto); |
281 | kunmap_local(vfrom); | |
1da177e4 LT |
282 | } |
283 | ||
77fff4ae AN |
284 | #endif |
285 | ||
a4602b62 KA |
286 | #ifndef __HAVE_ARCH_COPY_HIGHPAGE |
287 | ||
1da177e4 LT |
288 | static inline void copy_highpage(struct page *to, struct page *from) |
289 | { | |
290 | char *vfrom, *vto; | |
291 | ||
d2c20e51 IW |
292 | vfrom = kmap_local_page(from); |
293 | vto = kmap_local_page(to); | |
1da177e4 | 294 | copy_page(vto, vfrom); |
d2c20e51 IW |
295 | kunmap_local(vto); |
296 | kunmap_local(vfrom); | |
1da177e4 LT |
297 | } |
298 | ||
a4602b62 KA |
299 | #endif |
300 | ||
6a0996db IW |
301 | static inline void memcpy_page(struct page *dst_page, size_t dst_off, |
302 | struct page *src_page, size_t src_off, | |
303 | size_t len) | |
304 | { | |
305 | char *dst = kmap_local_page(dst_page); | |
306 | char *src = kmap_local_page(src_page); | |
307 | ||
ca18f6ea | 308 | VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); |
6a0996db IW |
309 | memcpy(dst + dst_off, src + src_off, len); |
310 | kunmap_local(src); | |
311 | kunmap_local(dst); | |
312 | } | |
313 | ||
314 | static inline void memmove_page(struct page *dst_page, size_t dst_off, | |
315 | struct page *src_page, size_t src_off, | |
316 | size_t len) | |
317 | { | |
318 | char *dst = kmap_local_page(dst_page); | |
319 | char *src = kmap_local_page(src_page); | |
320 | ||
ca18f6ea | 321 | VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); |
6a0996db IW |
322 | memmove(dst + dst_off, src + src_off, len); |
323 | kunmap_local(src); | |
324 | kunmap_local(dst); | |
325 | } | |
326 | ||
327 | static inline void memset_page(struct page *page, size_t offset, int val, | |
328 | size_t len) | |
329 | { | |
330 | char *addr = kmap_local_page(page); | |
331 | ||
ca18f6ea | 332 | VM_BUG_ON(offset + len > PAGE_SIZE); |
6a0996db IW |
333 | memset(addr + offset, val, len); |
334 | kunmap_local(addr); | |
335 | } | |
336 | ||
bb90d4bc IW |
337 | static inline void memcpy_from_page(char *to, struct page *page, |
338 | size_t offset, size_t len) | |
339 | { | |
61b205f5 | 340 | char *from = kmap_local_page(page); |
bb90d4bc | 341 | |
ca18f6ea | 342 | VM_BUG_ON(offset + len > PAGE_SIZE); |
bb90d4bc | 343 | memcpy(to, from + offset, len); |
61b205f5 | 344 | kunmap_local(from); |
bb90d4bc IW |
345 | } |
346 | ||
347 | static inline void memcpy_to_page(struct page *page, size_t offset, | |
348 | const char *from, size_t len) | |
349 | { | |
61b205f5 | 350 | char *to = kmap_local_page(page); |
bb90d4bc | 351 | |
ca18f6ea | 352 | VM_BUG_ON(offset + len > PAGE_SIZE); |
bb90d4bc | 353 | memcpy(to + offset, from, len); |
8dad53a1 | 354 | flush_dcache_page(page); |
61b205f5 | 355 | kunmap_local(to); |
bb90d4bc IW |
356 | } |
357 | ||
28961998 IW |
358 | static inline void memzero_page(struct page *page, size_t offset, size_t len) |
359 | { | |
d9a42b53 | 360 | char *addr = kmap_local_page(page); |
28961998 | 361 | memset(addr + offset, 0, len); |
8dad53a1 | 362 | flush_dcache_page(page); |
d9a42b53 | 363 | kunmap_local(addr); |
28961998 IW |
364 | } |
365 | ||
c0357139 MWO |
366 | /** |
367 | * folio_zero_segments() - Zero two byte ranges in a folio. | |
368 | * @folio: The folio to write to. | |
369 | * @start1: The first byte to zero. | |
370 | * @xend1: One more than the last byte in the first range. | |
371 | * @start2: The first byte to zero in the second range. | |
372 | * @xend2: One more than the last byte in the second range. | |
373 | */ | |
374 | static inline void folio_zero_segments(struct folio *folio, | |
375 | size_t start1, size_t xend1, size_t start2, size_t xend2) | |
376 | { | |
377 | zero_user_segments(&folio->page, start1, xend1, start2, xend2); | |
378 | } | |
379 | ||
380 | /** | |
381 | * folio_zero_segment() - Zero a byte range in a folio. | |
382 | * @folio: The folio to write to. | |
383 | * @start: The first byte to zero. | |
384 | * @xend: One more than the last byte to zero. | |
385 | */ | |
386 | static inline void folio_zero_segment(struct folio *folio, | |
387 | size_t start, size_t xend) | |
388 | { | |
389 | zero_user_segments(&folio->page, start, xend, 0, 0); | |
390 | } | |
391 | ||
392 | /** | |
393 | * folio_zero_range() - Zero a byte range in a folio. | |
394 | * @folio: The folio to write to. | |
395 | * @start: The first byte to zero. | |
396 | * @length: The number of bytes to zero. | |
397 | */ | |
398 | static inline void folio_zero_range(struct folio *folio, | |
399 | size_t start, size_t length) | |
400 | { | |
401 | zero_user_segments(&folio->page, start, start + length, 0, 0); | |
402 | } | |
403 | ||
1da177e4 | 404 | #endif /* _LINUX_HIGHMEM_H */ |