Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_HIGHMEM_H |
3 | #define _LINUX_HIGHMEM_H | |
4 | ||
1da177e4 | 5 | #include <linux/fs.h> |
597781f3 | 6 | #include <linux/kernel.h> |
187f1882 | 7 | #include <linux/bug.h> |
522a0032 | 8 | #include <linux/cacheflush.h> |
b073d7f8 | 9 | #include <linux/kmsan.h> |
1da177e4 | 10 | #include <linux/mm.h> |
ad76fb6b | 11 | #include <linux/uaccess.h> |
43b3a0c7 | 12 | #include <linux/hardirq.h> |
1da177e4 | 13 | |
13f876ba | 14 | #include "highmem-internal.h" |
03beb076 | 15 | |
13f876ba TG |
16 | /** |
17 | * kmap - Map a page for long term usage | |
18 | * @page: Pointer to the page to be mapped | |
19 | * | |
20 | * Returns: The virtual address of the mapping | |
21 | * | |
22 | * Can only be invoked from preemptible task context because on 32bit | |
23 | * systems with CONFIG_HIGHMEM enabled this function might sleep. | |
24 | * | |
25 | * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area | |
26 | * this returns the virtual address of the direct kernel mapping. | |
27 | * | |
28 | * The returned virtual address is globally visible and valid up to the | |
29 | * point where it is unmapped via kunmap(). The pointer can be handed to | |
30 | * other contexts. | |
31 | * | |
32 | * For highmem pages on 32bit systems this can be slow as the mapping space | |
33 | * is limited and protected by a global lock. In case that there is no | |
34 | * mapping slot available the function blocks until a slot is released via | |
35 | * kunmap(). | |
298fa1ad | 36 | */ |
13f876ba | 37 | static inline void *kmap(struct page *page); |
525aaf9b | 38 | |
13f876ba TG |
39 | /** |
40 | * kunmap - Unmap the virtual address mapped by kmap() | |
e7392b4e | 41 | * @page: Pointer to the page which was mapped by kmap() |
13f876ba TG |
42 | * |
43 | * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of | |
44 | * pages in the low memory area. | |
78b6d91e | 45 | */ |
13f876ba | 46 | static inline void kunmap(struct page *page); |
298fa1ad | 47 | |
13f876ba TG |
48 | /** |
49 | * kmap_to_page - Get the page for a kmap'ed address | |
50 | * @addr: The address to look up | |
51 | * | |
52 | * Returns: The page which is mapped to @addr. | |
53 | */ | |
54 | static inline struct page *kmap_to_page(void *addr); | |
1da177e4 | 55 | |
13f876ba TG |
56 | /** |
57 | * kmap_flush_unused - Flush all unused kmap mappings in order to | |
58 | * remove stray mappings | |
59 | */ | |
60 | static inline void kmap_flush_unused(void); | |
1da177e4 | 61 | |
13f876ba | 62 | /** |
f3ba3c71 | 63 | * kmap_local_page - Map a page for temporary usage |
729337bc | 64 | * @page: Pointer to the page to be mapped |
13f876ba TG |
65 | * |
66 | * Returns: The virtual address of the mapping | |
67 | * | |
383bbef2 | 68 | * Can be invoked from any context, including interrupts. |
13f876ba TG |
69 | * |
70 | * Requires careful handling when nesting multiple mappings because the map | |
71 | * management is stack based. The unmap has to be in the reverse order of | |
72 | * the map operation: | |
73 | * | |
f3ba3c71 TG |
74 | * addr1 = kmap_local_page(page1); |
75 | * addr2 = kmap_local_page(page2); | |
13f876ba | 76 | * ... |
f3ba3c71 TG |
77 | * kunmap_local(addr2); |
78 | * kunmap_local(addr1); | |
13f876ba TG |
79 | * |
80 | * Unmapping addr1 before addr2 is invalid and causes malfunction. | |
81 | * | |
82 | * Contrary to kmap() mappings the mapping is only valid in the context of | |
83 | * the caller and cannot be handed to other contexts. | |
84 | * | |
85 | * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the | |
86 | * virtual address of the direct mapping. Only real highmem pages are | |
87 | * temporarily mapped. | |
88 | * | |
1f8549fc FDF |
89 | * While kmap_local_page() is significantly faster than kmap() for the highmem |
90 | * case it comes with restrictions about the pointer validity. | |
f3ba3c71 TG |
91 | * |
92 | * On HIGHMEM enabled systems mapping a highmem page has the side effect of | |
93 | * disabling migration in order to keep the virtual address stable across | |
94 | * preemption. No caller of kmap_local_page() can rely on this side effect. | |
95 | */ | |
96 | static inline void *kmap_local_page(struct page *page); | |
97 | ||
53c36de0 MWO |
98 | /** |
99 | * kmap_local_folio - Map a page in this folio for temporary usage | |
100 | * @folio: The folio containing the page. | |
101 | * @offset: The byte offset within the folio which identifies the page. | |
102 | * | |
103 | * Requires careful handling when nesting multiple mappings because the map | |
104 | * management is stack based. The unmap has to be in the reverse order of | |
105 | * the map operation:: | |
106 | * | |
107 | * addr1 = kmap_local_folio(folio1, offset1); | |
108 | * addr2 = kmap_local_folio(folio2, offset2); | |
109 | * ... | |
110 | * kunmap_local(addr2); | |
111 | * kunmap_local(addr1); | |
112 | * | |
113 | * Unmapping addr1 before addr2 is invalid and causes malfunction. | |
114 | * | |
115 | * Contrary to kmap() mappings the mapping is only valid in the context of | |
116 | * the caller and cannot be handed to other contexts. | |
117 | * | |
118 | * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the | |
119 | * virtual address of the direct mapping. Only real highmem pages are | |
120 | * temporarily mapped. | |
121 | * | |
9eefefd8 FDF |
122 | * While it is significantly faster than kmap() for the highmem case it |
123 | * comes with restrictions about the pointer validity. | |
53c36de0 MWO |
124 | * |
125 | * On HIGHMEM enabled systems mapping a highmem page has the side effect of | |
126 | * disabling migration in order to keep the virtual address stable across | |
127 | * preemption. No caller of kmap_local_folio() can rely on this side effect. | |
128 | * | |
129 | * Context: Can be invoked from any context. | |
130 | * Return: The virtual address of @offset. | |
131 | */ | |
132 | static inline void *kmap_local_folio(struct folio *folio, size_t offset); | |
133 | ||
f3ba3c71 TG |
134 | /** |
135 | * kmap_atomic - Atomically map a page for temporary usage - Deprecated! | |
136 | * @page: Pointer to the page to be mapped | |
137 | * | |
138 | * Returns: The virtual address of the mapping | |
139 | * | |
e7392b4e FDF |
140 | * In fact a wrapper around kmap_local_page() which also disables pagefaults |
141 | * and, depending on PREEMPT_RT configuration, also CPU migration and | |
142 | * preemption. Therefore users should not count on the latter two side effects. | |
143 | * | |
144 | * Mappings should always be released by kunmap_atomic(). | |
f3ba3c71 TG |
145 | * |
146 | * Do not use in new code. Use kmap_local_page() instead. | |
85a85e76 FDF |
147 | * |
148 | * It is used in atomic context when code wants to access the contents of a | |
149 | * page that might be allocated from high memory (see __GFP_HIGHMEM), for | |
150 | * example a page in the pagecache. The API has two functions, and they | |
cffe57be | 151 | * can be used in a manner similar to the following:: |
85a85e76 | 152 | * |
cffe57be BS |
153 | * // Find the page of interest. |
154 | * struct page *page = find_get_page(mapping, offset); | |
85a85e76 | 155 | * |
cffe57be BS |
156 | * // Gain access to the contents of that page. |
157 | * void *vaddr = kmap_atomic(page); | |
85a85e76 | 158 | * |
cffe57be BS |
159 | * // Do something to the contents of that page. |
160 | * memset(vaddr, 0, PAGE_SIZE); | |
85a85e76 | 161 | * |
cffe57be BS |
162 | * // Unmap that page. |
163 | * kunmap_atomic(vaddr); | |
85a85e76 FDF |
164 | * |
165 | * Note that the kunmap_atomic() call takes the result of the kmap_atomic() | |
166 | * call, not the argument. | |
167 | * | |
168 | * If you need to map two pages because you want to copy from one page to | |
169 | * another you need to keep the kmap_atomic calls strictly nested, like: | |
170 | * | |
171 | * vaddr1 = kmap_atomic(page1); | |
172 | * vaddr2 = kmap_atomic(page2); | |
173 | * | |
174 | * memcpy(vaddr1, vaddr2, PAGE_SIZE); | |
175 | * | |
176 | * kunmap_atomic(vaddr2); | |
177 | * kunmap_atomic(vaddr1); | |
13f876ba TG |
178 | */ |
179 | static inline void *kmap_atomic(struct page *page); | |
5a178119 | 180 | |
13f876ba TG |
181 | /* Highmem related interfaces for management code */ |
182 | static inline unsigned int nr_free_highpages(void); | |
183 | static inline unsigned long totalhigh_pages(void); | |
1da177e4 | 184 | |
13f876ba TG |
185 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
186 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | |
e23c4597 IW |
187 | { |
188 | } | |
7438f363 | 189 | #endif |
1da177e4 | 190 | |
f358afc5 | 191 | #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE |
13f876ba | 192 | static inline void flush_kernel_vmap_range(void *vaddr, int size) |
298fa1ad | 193 | { |
298fa1ad | 194 | } |
13f876ba | 195 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) |
298fa1ad | 196 | { |
298fa1ad | 197 | } |
7438f363 | 198 | #endif |
980c19e3 | 199 | |
1da177e4 | 200 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
487ff320 | 201 | #ifndef clear_user_highpage |
1da177e4 LT |
202 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
203 | { | |
d2c20e51 | 204 | void *addr = kmap_local_page(page); |
1da177e4 | 205 | clear_user_page(addr, vaddr, page); |
d2c20e51 | 206 | kunmap_local(addr); |
1da177e4 | 207 | } |
487ff320 | 208 | #endif |
1da177e4 | 209 | |
6bc56a4d | 210 | #ifndef vma_alloc_zeroed_movable_folio |
769848c0 | 211 | /** |
6bc56a4d MWO |
212 | * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA. |
213 | * @vma: The VMA the page is to be allocated for. | |
214 | * @vaddr: The virtual address the page will be inserted into. | |
769848c0 | 215 | * |
6bc56a4d MWO |
216 | * This function will allocate a page suitable for inserting into this |
217 | * VMA at this virtual address. It may be allocated from highmem or | |
218 | * the movable zone. An architecture may provide its own implementation. | |
e7392b4e | 219 | * |
6bc56a4d MWO |
220 | * Return: A folio containing one allocated and zeroed page or NULL if |
221 | * we are out of memory. | |
769848c0 | 222 | */ |
6bc56a4d MWO |
223 | static inline |
224 | struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, | |
92638b4e | 225 | unsigned long vaddr) |
1da177e4 | 226 | { |
6bc56a4d | 227 | struct folio *folio; |
1da177e4 | 228 | |
6bc56a4d MWO |
229 | folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false); |
230 | if (folio) | |
231 | clear_user_highpage(&folio->page, vaddr); | |
1da177e4 | 232 | |
6bc56a4d | 233 | return folio; |
1da177e4 LT |
234 | } |
235 | #endif | |
236 | ||
237 | static inline void clear_highpage(struct page *page) | |
238 | { | |
d2c20e51 | 239 | void *kaddr = kmap_local_page(page); |
1da177e4 | 240 | clear_page(kaddr); |
d2c20e51 | 241 | kunmap_local(kaddr); |
1da177e4 LT |
242 | } |
243 | ||
d9da8f6c AK |
244 | static inline void clear_highpage_kasan_tagged(struct page *page) |
245 | { | |
16d91faf | 246 | void *kaddr = kmap_local_page(page); |
d9da8f6c | 247 | |
16d91faf PC |
248 | clear_page(kasan_reset_tag(kaddr)); |
249 | kunmap_local(kaddr); | |
d9da8f6c AK |
250 | } |
251 | ||
013bb59d PC |
252 | #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE |
253 | ||
254 | static inline void tag_clear_highpage(struct page *page) | |
255 | { | |
256 | } | |
257 | ||
258 | #endif | |
259 | ||
0060ef3b MWO |
260 | /* |
261 | * If we pass in a base or tail page, we can zero up to PAGE_SIZE. | |
262 | * If we pass in a head page, we can zero up to the size of the compound page. | |
263 | */ | |
c0357139 | 264 | #ifdef CONFIG_HIGHMEM |
0060ef3b MWO |
265 | void zero_user_segments(struct page *page, unsigned start1, unsigned end1, |
266 | unsigned start2, unsigned end2); | |
c0357139 | 267 | #else |
eebd2aa3 | 268 | static inline void zero_user_segments(struct page *page, |
0060ef3b MWO |
269 | unsigned start1, unsigned end1, |
270 | unsigned start2, unsigned end2) | |
eebd2aa3 | 271 | { |
d2c20e51 | 272 | void *kaddr = kmap_local_page(page); |
0060ef3b | 273 | unsigned int i; |
eebd2aa3 | 274 | |
0060ef3b | 275 | BUG_ON(end1 > page_size(page) || end2 > page_size(page)); |
eebd2aa3 CL |
276 | |
277 | if (end1 > start1) | |
278 | memset(kaddr + start1, 0, end1 - start1); | |
279 | ||
280 | if (end2 > start2) | |
281 | memset(kaddr + start2, 0, end2 - start2); | |
282 | ||
d2c20e51 | 283 | kunmap_local(kaddr); |
0060ef3b MWO |
284 | for (i = 0; i < compound_nr(page); i++) |
285 | flush_dcache_page(page + i); | |
eebd2aa3 | 286 | } |
c0357139 | 287 | #endif |
eebd2aa3 CL |
288 | |
289 | static inline void zero_user_segment(struct page *page, | |
290 | unsigned start, unsigned end) | |
291 | { | |
292 | zero_user_segments(page, start, end, 0, 0); | |
293 | } | |
294 | ||
295 | static inline void zero_user(struct page *page, | |
296 | unsigned start, unsigned size) | |
297 | { | |
298 | zero_user_segments(page, start, start + size, 0, 0); | |
299 | } | |
01f2705d | 300 | |
77fff4ae AN |
301 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE |
302 | ||
9de455b2 AN |
303 | static inline void copy_user_highpage(struct page *to, struct page *from, |
304 | unsigned long vaddr, struct vm_area_struct *vma) | |
1da177e4 LT |
305 | { |
306 | char *vfrom, *vto; | |
307 | ||
d2c20e51 IW |
308 | vfrom = kmap_local_page(from); |
309 | vto = kmap_local_page(to); | |
1da177e4 | 310 | copy_user_page(vto, vfrom, vaddr, to); |
b073d7f8 | 311 | kmsan_unpoison_memory(page_address(to), PAGE_SIZE); |
d2c20e51 IW |
312 | kunmap_local(vto); |
313 | kunmap_local(vfrom); | |
1da177e4 LT |
314 | } |
315 | ||
77fff4ae AN |
316 | #endif |
317 | ||
6efc7afb JY |
318 | #ifndef __HAVE_ARCH_COPY_HIGHPAGE |
319 | ||
320 | static inline void copy_highpage(struct page *to, struct page *from) | |
321 | { | |
322 | char *vfrom, *vto; | |
323 | ||
324 | vfrom = kmap_local_page(from); | |
325 | vto = kmap_local_page(to); | |
326 | copy_page(vto, vfrom); | |
327 | kmsan_copy_page_meta(to, from); | |
328 | kunmap_local(vto); | |
329 | kunmap_local(vfrom); | |
330 | } | |
331 | ||
332 | #endif | |
333 | ||
a873dfe1 | 334 | #ifdef copy_mc_to_kernel |
6efc7afb JY |
335 | /* |
336 | * If architecture supports machine check exception handling, define the | |
337 | * #MC versions of copy_user_highpage and copy_highpage. They copy a memory | |
338 | * page with #MC in source page (@from) handled, and return the number | |
339 | * of bytes not copied if there was a #MC, otherwise 0 for success. | |
340 | */ | |
a873dfe1 TL |
341 | static inline int copy_mc_user_highpage(struct page *to, struct page *from, |
342 | unsigned long vaddr, struct vm_area_struct *vma) | |
343 | { | |
344 | unsigned long ret; | |
345 | char *vfrom, *vto; | |
346 | ||
347 | vfrom = kmap_local_page(from); | |
348 | vto = kmap_local_page(to); | |
349 | ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE); | |
350 | if (!ret) | |
351 | kmsan_unpoison_memory(page_address(to), PAGE_SIZE); | |
352 | kunmap_local(vto); | |
353 | kunmap_local(vfrom); | |
354 | ||
355 | return ret; | |
356 | } | |
a873dfe1 | 357 | |
6efc7afb | 358 | static inline int copy_mc_highpage(struct page *to, struct page *from) |
1da177e4 | 359 | { |
6efc7afb | 360 | unsigned long ret; |
1da177e4 LT |
361 | char *vfrom, *vto; |
362 | ||
d2c20e51 IW |
363 | vfrom = kmap_local_page(from); |
364 | vto = kmap_local_page(to); | |
6efc7afb JY |
365 | ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE); |
366 | if (!ret) | |
367 | kmsan_copy_page_meta(to, from); | |
d2c20e51 IW |
368 | kunmap_local(vto); |
369 | kunmap_local(vfrom); | |
6efc7afb JY |
370 | |
371 | return ret; | |
372 | } | |
373 | #else | |
374 | static inline int copy_mc_user_highpage(struct page *to, struct page *from, | |
375 | unsigned long vaddr, struct vm_area_struct *vma) | |
376 | { | |
377 | copy_user_highpage(to, from, vaddr, vma); | |
378 | return 0; | |
1da177e4 LT |
379 | } |
380 | ||
6efc7afb JY |
381 | static inline int copy_mc_highpage(struct page *to, struct page *from) |
382 | { | |
383 | copy_highpage(to, from); | |
384 | return 0; | |
385 | } | |
a4602b62 KA |
386 | #endif |
387 | ||
6a0996db IW |
388 | static inline void memcpy_page(struct page *dst_page, size_t dst_off, |
389 | struct page *src_page, size_t src_off, | |
390 | size_t len) | |
391 | { | |
392 | char *dst = kmap_local_page(dst_page); | |
393 | char *src = kmap_local_page(src_page); | |
394 | ||
ca18f6ea | 395 | VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); |
6a0996db IW |
396 | memcpy(dst + dst_off, src + src_off, len); |
397 | kunmap_local(src); | |
398 | kunmap_local(dst); | |
399 | } | |
400 | ||
6a0996db IW |
401 | static inline void memset_page(struct page *page, size_t offset, int val, |
402 | size_t len) | |
403 | { | |
404 | char *addr = kmap_local_page(page); | |
405 | ||
ca18f6ea | 406 | VM_BUG_ON(offset + len > PAGE_SIZE); |
6a0996db IW |
407 | memset(addr + offset, val, len); |
408 | kunmap_local(addr); | |
409 | } | |
410 | ||
bb90d4bc IW |
411 | static inline void memcpy_from_page(char *to, struct page *page, |
412 | size_t offset, size_t len) | |
413 | { | |
61b205f5 | 414 | char *from = kmap_local_page(page); |
bb90d4bc | 415 | |
ca18f6ea | 416 | VM_BUG_ON(offset + len > PAGE_SIZE); |
bb90d4bc | 417 | memcpy(to, from + offset, len); |
61b205f5 | 418 | kunmap_local(from); |
bb90d4bc IW |
419 | } |
420 | ||
421 | static inline void memcpy_to_page(struct page *page, size_t offset, | |
422 | const char *from, size_t len) | |
423 | { | |
61b205f5 | 424 | char *to = kmap_local_page(page); |
bb90d4bc | 425 | |
ca18f6ea | 426 | VM_BUG_ON(offset + len > PAGE_SIZE); |
bb90d4bc | 427 | memcpy(to + offset, from, len); |
8dad53a1 | 428 | flush_dcache_page(page); |
61b205f5 | 429 | kunmap_local(to); |
bb90d4bc IW |
430 | } |
431 | ||
28961998 IW |
432 | static inline void memzero_page(struct page *page, size_t offset, size_t len) |
433 | { | |
d9a42b53 | 434 | char *addr = kmap_local_page(page); |
f38adfef FDF |
435 | |
436 | VM_BUG_ON(offset + len > PAGE_SIZE); | |
28961998 | 437 | memset(addr + offset, 0, len); |
8dad53a1 | 438 | flush_dcache_page(page); |
d9a42b53 | 439 | kunmap_local(addr); |
28961998 IW |
440 | } |
441 | ||
b23d03ef MWO |
442 | static inline void memcpy_from_folio(char *to, struct folio *folio, |
443 | size_t offset, size_t len) | |
444 | { | |
445 | VM_BUG_ON(offset + len > folio_size(folio)); | |
446 | ||
447 | do { | |
448 | const char *from = kmap_local_folio(folio, offset); | |
449 | size_t chunk = len; | |
450 | ||
451 | if (folio_test_highmem(folio) && | |
452 | chunk > PAGE_SIZE - offset_in_page(offset)) | |
453 | chunk = PAGE_SIZE - offset_in_page(offset); | |
454 | memcpy(to, from, chunk); | |
455 | kunmap_local(from); | |
456 | ||
73424d00 | 457 | to += chunk; |
b23d03ef MWO |
458 | offset += chunk; |
459 | len -= chunk; | |
460 | } while (len > 0); | |
461 | } | |
462 | ||
463 | static inline void memcpy_to_folio(struct folio *folio, size_t offset, | |
464 | const char *from, size_t len) | |
465 | { | |
466 | VM_BUG_ON(offset + len > folio_size(folio)); | |
467 | ||
468 | do { | |
469 | char *to = kmap_local_folio(folio, offset); | |
470 | size_t chunk = len; | |
471 | ||
472 | if (folio_test_highmem(folio) && | |
473 | chunk > PAGE_SIZE - offset_in_page(offset)) | |
474 | chunk = PAGE_SIZE - offset_in_page(offset); | |
475 | memcpy(to, from, chunk); | |
476 | kunmap_local(to); | |
477 | ||
478 | from += chunk; | |
479 | offset += chunk; | |
480 | len -= chunk; | |
481 | } while (len > 0); | |
482 | ||
483 | flush_dcache_folio(folio); | |
484 | } | |
485 | ||
00cdf760 MWO |
486 | /** |
487 | * memcpy_from_file_folio - Copy some bytes from a file folio. | |
488 | * @to: The destination buffer. | |
489 | * @folio: The folio to copy from. | |
490 | * @pos: The position in the file. | |
491 | * @len: The maximum number of bytes to copy. | |
492 | * | |
493 | * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE | |
494 | * if the folio comes from HIGHMEM, and by the size of the folio. | |
495 | * | |
496 | * Return: The number of bytes copied from the folio. | |
497 | */ | |
498 | static inline size_t memcpy_from_file_folio(char *to, struct folio *folio, | |
499 | loff_t pos, size_t len) | |
500 | { | |
501 | size_t offset = offset_in_folio(folio, pos); | |
502 | char *from = kmap_local_folio(folio, offset); | |
503 | ||
c643e6eb MWO |
504 | if (folio_test_highmem(folio)) { |
505 | offset = offset_in_page(offset); | |
00cdf760 | 506 | len = min_t(size_t, len, PAGE_SIZE - offset); |
c643e6eb | 507 | } else |
00cdf760 MWO |
508 | len = min(len, folio_size(folio) - offset); |
509 | ||
510 | memcpy(to, from, len); | |
511 | kunmap_local(from); | |
512 | ||
513 | return len; | |
514 | } | |
515 | ||
c0357139 MWO |
516 | /** |
517 | * folio_zero_segments() - Zero two byte ranges in a folio. | |
518 | * @folio: The folio to write to. | |
519 | * @start1: The first byte to zero. | |
520 | * @xend1: One more than the last byte in the first range. | |
521 | * @start2: The first byte to zero in the second range. | |
522 | * @xend2: One more than the last byte in the second range. | |
523 | */ | |
524 | static inline void folio_zero_segments(struct folio *folio, | |
525 | size_t start1, size_t xend1, size_t start2, size_t xend2) | |
526 | { | |
527 | zero_user_segments(&folio->page, start1, xend1, start2, xend2); | |
528 | } | |
529 | ||
530 | /** | |
531 | * folio_zero_segment() - Zero a byte range in a folio. | |
532 | * @folio: The folio to write to. | |
533 | * @start: The first byte to zero. | |
534 | * @xend: One more than the last byte to zero. | |
535 | */ | |
536 | static inline void folio_zero_segment(struct folio *folio, | |
537 | size_t start, size_t xend) | |
538 | { | |
539 | zero_user_segments(&folio->page, start, xend, 0, 0); | |
540 | } | |
541 | ||
542 | /** | |
543 | * folio_zero_range() - Zero a byte range in a folio. | |
544 | * @folio: The folio to write to. | |
545 | * @start: The first byte to zero. | |
546 | * @length: The number of bytes to zero. | |
547 | */ | |
548 | static inline void folio_zero_range(struct folio *folio, | |
549 | size_t start, size_t length) | |
550 | { | |
551 | zero_user_segments(&folio->page, start, start + length, 0, 0); | |
552 | } | |
553 | ||
3de6047f MWO |
554 | /** |
555 | * folio_release_kmap - Unmap a folio and drop a refcount. | |
556 | * @folio: The folio to release. | |
557 | * @addr: The address previously returned by a call to kmap_local_folio(). | |
558 | * | |
559 | * It is common, eg in directory handling to kmap a folio. This function | |
560 | * unmaps the folio and drops the refcount that was being held to keep the | |
561 | * folio alive while we accessed it. | |
562 | */ | |
563 | static inline void folio_release_kmap(struct folio *folio, void *addr) | |
849ad04c AV |
564 | { |
565 | kunmap_local(addr); | |
3de6047f MWO |
566 | folio_put(folio); |
567 | } | |
568 | ||
569 | static inline void unmap_and_put_page(struct page *page, void *addr) | |
570 | { | |
571 | folio_release_kmap(page_folio(page), addr); | |
849ad04c AV |
572 | } |
573 | ||
1da177e4 | 574 | #endif /* _LINUX_HIGHMEM_H */ |