Merge branch 'address-masking'
[linux-2.6-block.git] / include / linux / highmem.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_HIGHMEM_H
3#define _LINUX_HIGHMEM_H
4
1da177e4 5#include <linux/fs.h>
597781f3 6#include <linux/kernel.h>
187f1882 7#include <linux/bug.h>
522a0032 8#include <linux/cacheflush.h>
b073d7f8 9#include <linux/kmsan.h>
1da177e4 10#include <linux/mm.h>
ad76fb6b 11#include <linux/uaccess.h>
43b3a0c7 12#include <linux/hardirq.h>
1da177e4 13
13f876ba 14#include "highmem-internal.h"
03beb076 15
13f876ba
TG
16/**
17 * kmap - Map a page for long term usage
18 * @page: Pointer to the page to be mapped
19 *
20 * Returns: The virtual address of the mapping
21 *
22 * Can only be invoked from preemptible task context because on 32bit
23 * systems with CONFIG_HIGHMEM enabled this function might sleep.
24 *
25 * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
26 * this returns the virtual address of the direct kernel mapping.
27 *
28 * The returned virtual address is globally visible and valid up to the
29 * point where it is unmapped via kunmap(). The pointer can be handed to
30 * other contexts.
31 *
32 * For highmem pages on 32bit systems this can be slow as the mapping space
33 * is limited and protected by a global lock. In case that there is no
34 * mapping slot available the function blocks until a slot is released via
35 * kunmap().
298fa1ad 36 */
13f876ba 37static inline void *kmap(struct page *page);
525aaf9b 38
13f876ba
TG
39/**
40 * kunmap - Unmap the virtual address mapped by kmap()
e7392b4e 41 * @page: Pointer to the page which was mapped by kmap()
13f876ba
TG
42 *
43 * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
44 * pages in the low memory area.
78b6d91e 45 */
13f876ba 46static inline void kunmap(struct page *page);
298fa1ad 47
13f876ba
TG
48/**
49 * kmap_to_page - Get the page for a kmap'ed address
50 * @addr: The address to look up
51 *
52 * Returns: The page which is mapped to @addr.
53 */
54static inline struct page *kmap_to_page(void *addr);
1da177e4 55
13f876ba
TG
56/**
57 * kmap_flush_unused - Flush all unused kmap mappings in order to
58 * remove stray mappings
59 */
60static inline void kmap_flush_unused(void);
1da177e4 61
13f876ba 62/**
f3ba3c71 63 * kmap_local_page - Map a page for temporary usage
729337bc 64 * @page: Pointer to the page to be mapped
13f876ba
TG
65 *
66 * Returns: The virtual address of the mapping
67 *
383bbef2 68 * Can be invoked from any context, including interrupts.
13f876ba
TG
69 *
70 * Requires careful handling when nesting multiple mappings because the map
71 * management is stack based. The unmap has to be in the reverse order of
72 * the map operation:
73 *
f3ba3c71
TG
74 * addr1 = kmap_local_page(page1);
75 * addr2 = kmap_local_page(page2);
13f876ba 76 * ...
f3ba3c71
TG
77 * kunmap_local(addr2);
78 * kunmap_local(addr1);
13f876ba
TG
79 *
80 * Unmapping addr1 before addr2 is invalid and causes malfunction.
81 *
82 * Contrary to kmap() mappings the mapping is only valid in the context of
83 * the caller and cannot be handed to other contexts.
84 *
85 * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
86 * virtual address of the direct mapping. Only real highmem pages are
87 * temporarily mapped.
88 *
1f8549fc
FDF
89 * While kmap_local_page() is significantly faster than kmap() for the highmem
90 * case it comes with restrictions about the pointer validity.
f3ba3c71
TG
91 *
92 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
93 * disabling migration in order to keep the virtual address stable across
94 * preemption. No caller of kmap_local_page() can rely on this side effect.
95 */
96static inline void *kmap_local_page(struct page *page);
97
53c36de0
MWO
98/**
99 * kmap_local_folio - Map a page in this folio for temporary usage
100 * @folio: The folio containing the page.
101 * @offset: The byte offset within the folio which identifies the page.
102 *
103 * Requires careful handling when nesting multiple mappings because the map
104 * management is stack based. The unmap has to be in the reverse order of
105 * the map operation::
106 *
107 * addr1 = kmap_local_folio(folio1, offset1);
108 * addr2 = kmap_local_folio(folio2, offset2);
109 * ...
110 * kunmap_local(addr2);
111 * kunmap_local(addr1);
112 *
113 * Unmapping addr1 before addr2 is invalid and causes malfunction.
114 *
115 * Contrary to kmap() mappings the mapping is only valid in the context of
116 * the caller and cannot be handed to other contexts.
117 *
118 * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
119 * virtual address of the direct mapping. Only real highmem pages are
120 * temporarily mapped.
121 *
9eefefd8
FDF
122 * While it is significantly faster than kmap() for the highmem case it
123 * comes with restrictions about the pointer validity.
53c36de0
MWO
124 *
125 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
126 * disabling migration in order to keep the virtual address stable across
127 * preemption. No caller of kmap_local_folio() can rely on this side effect.
128 *
129 * Context: Can be invoked from any context.
130 * Return: The virtual address of @offset.
131 */
132static inline void *kmap_local_folio(struct folio *folio, size_t offset);
133
f3ba3c71
TG
134/**
135 * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
136 * @page: Pointer to the page to be mapped
137 *
138 * Returns: The virtual address of the mapping
139 *
e7392b4e
FDF
140 * In fact a wrapper around kmap_local_page() which also disables pagefaults
141 * and, depending on PREEMPT_RT configuration, also CPU migration and
142 * preemption. Therefore users should not count on the latter two side effects.
143 *
144 * Mappings should always be released by kunmap_atomic().
f3ba3c71
TG
145 *
146 * Do not use in new code. Use kmap_local_page() instead.
85a85e76
FDF
147 *
148 * It is used in atomic context when code wants to access the contents of a
149 * page that might be allocated from high memory (see __GFP_HIGHMEM), for
150 * example a page in the pagecache. The API has two functions, and they
cffe57be 151 * can be used in a manner similar to the following::
85a85e76 152 *
cffe57be
BS
153 * // Find the page of interest.
154 * struct page *page = find_get_page(mapping, offset);
85a85e76 155 *
cffe57be
BS
156 * // Gain access to the contents of that page.
157 * void *vaddr = kmap_atomic(page);
85a85e76 158 *
cffe57be
BS
159 * // Do something to the contents of that page.
160 * memset(vaddr, 0, PAGE_SIZE);
85a85e76 161 *
cffe57be
BS
162 * // Unmap that page.
163 * kunmap_atomic(vaddr);
85a85e76
FDF
164 *
165 * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
166 * call, not the argument.
167 *
168 * If you need to map two pages because you want to copy from one page to
169 * another you need to keep the kmap_atomic calls strictly nested, like:
170 *
171 * vaddr1 = kmap_atomic(page1);
172 * vaddr2 = kmap_atomic(page2);
173 *
174 * memcpy(vaddr1, vaddr2, PAGE_SIZE);
175 *
176 * kunmap_atomic(vaddr2);
177 * kunmap_atomic(vaddr1);
13f876ba
TG
178 */
179static inline void *kmap_atomic(struct page *page);
5a178119 180
13f876ba 181/* Highmem related interfaces for management code */
90b8fab5 182static inline unsigned long nr_free_highpages(void);
13f876ba 183static inline unsigned long totalhigh_pages(void);
1da177e4 184
13f876ba
TG
185#ifndef ARCH_HAS_FLUSH_ANON_PAGE
186static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
e23c4597
IW
187{
188}
7438f363 189#endif
1da177e4 190
f358afc5 191#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
13f876ba 192static inline void flush_kernel_vmap_range(void *vaddr, int size)
298fa1ad 193{
298fa1ad 194}
13f876ba 195static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
298fa1ad 196{
298fa1ad 197}
7438f363 198#endif
980c19e3 199
1da177e4 200/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
487ff320 201#ifndef clear_user_highpage
1da177e4
LT
202static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
203{
d2c20e51 204 void *addr = kmap_local_page(page);
1da177e4 205 clear_user_page(addr, vaddr, page);
d2c20e51 206 kunmap_local(addr);
1da177e4 207}
487ff320 208#endif
1da177e4 209
6bc56a4d 210#ifndef vma_alloc_zeroed_movable_folio
769848c0 211/**
6bc56a4d
MWO
212 * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
213 * @vma: The VMA the page is to be allocated for.
214 * @vaddr: The virtual address the page will be inserted into.
769848c0 215 *
6bc56a4d
MWO
216 * This function will allocate a page suitable for inserting into this
217 * VMA at this virtual address. It may be allocated from highmem or
218 * the movable zone. An architecture may provide its own implementation.
e7392b4e 219 *
6bc56a4d
MWO
220 * Return: A folio containing one allocated and zeroed page or NULL if
221 * we are out of memory.
769848c0 222 */
6bc56a4d
MWO
223static inline
224struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
92638b4e 225 unsigned long vaddr)
1da177e4 226{
6bc56a4d 227 struct folio *folio;
1da177e4 228
6bc56a4d
MWO
229 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
230 if (folio)
231 clear_user_highpage(&folio->page, vaddr);
1da177e4 232
6bc56a4d 233 return folio;
1da177e4
LT
234}
235#endif
236
237static inline void clear_highpage(struct page *page)
238{
d2c20e51 239 void *kaddr = kmap_local_page(page);
1da177e4 240 clear_page(kaddr);
d2c20e51 241 kunmap_local(kaddr);
1da177e4
LT
242}
243
d9da8f6c
AK
244static inline void clear_highpage_kasan_tagged(struct page *page)
245{
16d91faf 246 void *kaddr = kmap_local_page(page);
d9da8f6c 247
16d91faf
PC
248 clear_page(kasan_reset_tag(kaddr));
249 kunmap_local(kaddr);
d9da8f6c
AK
250}
251
013bb59d
PC
252#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
253
254static inline void tag_clear_highpage(struct page *page)
255{
256}
257
258#endif
259
0060ef3b
MWO
260/*
261 * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
262 * If we pass in a head page, we can zero up to the size of the compound page.
263 */
c0357139 264#ifdef CONFIG_HIGHMEM
0060ef3b
MWO
265void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
266 unsigned start2, unsigned end2);
c0357139 267#else
eebd2aa3 268static inline void zero_user_segments(struct page *page,
0060ef3b
MWO
269 unsigned start1, unsigned end1,
270 unsigned start2, unsigned end2)
eebd2aa3 271{
d2c20e51 272 void *kaddr = kmap_local_page(page);
0060ef3b 273 unsigned int i;
eebd2aa3 274
0060ef3b 275 BUG_ON(end1 > page_size(page) || end2 > page_size(page));
eebd2aa3
CL
276
277 if (end1 > start1)
278 memset(kaddr + start1, 0, end1 - start1);
279
280 if (end2 > start2)
281 memset(kaddr + start2, 0, end2 - start2);
282
d2c20e51 283 kunmap_local(kaddr);
0060ef3b
MWO
284 for (i = 0; i < compound_nr(page); i++)
285 flush_dcache_page(page + i);
eebd2aa3 286}
c0357139 287#endif
eebd2aa3
CL
288
289static inline void zero_user_segment(struct page *page,
290 unsigned start, unsigned end)
291{
292 zero_user_segments(page, start, end, 0, 0);
293}
294
295static inline void zero_user(struct page *page,
296 unsigned start, unsigned size)
297{
298 zero_user_segments(page, start, start + size, 0, 0);
299}
01f2705d 300
77fff4ae
AN
301#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
302
9de455b2
AN
303static inline void copy_user_highpage(struct page *to, struct page *from,
304 unsigned long vaddr, struct vm_area_struct *vma)
1da177e4
LT
305{
306 char *vfrom, *vto;
307
d2c20e51
IW
308 vfrom = kmap_local_page(from);
309 vto = kmap_local_page(to);
1da177e4 310 copy_user_page(vto, vfrom, vaddr, to);
b073d7f8 311 kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
d2c20e51
IW
312 kunmap_local(vto);
313 kunmap_local(vfrom);
1da177e4
LT
314}
315
77fff4ae
AN
316#endif
317
6efc7afb
JY
318#ifndef __HAVE_ARCH_COPY_HIGHPAGE
319
320static inline void copy_highpage(struct page *to, struct page *from)
321{
322 char *vfrom, *vto;
323
324 vfrom = kmap_local_page(from);
325 vto = kmap_local_page(to);
326 copy_page(vto, vfrom);
327 kmsan_copy_page_meta(to, from);
328 kunmap_local(vto);
329 kunmap_local(vfrom);
330}
331
332#endif
333
a873dfe1 334#ifdef copy_mc_to_kernel
6efc7afb
JY
335/*
336 * If architecture supports machine check exception handling, define the
337 * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
338 * page with #MC in source page (@from) handled, and return the number
339 * of bytes not copied if there was a #MC, otherwise 0 for success.
340 */
a873dfe1
TL
341static inline int copy_mc_user_highpage(struct page *to, struct page *from,
342 unsigned long vaddr, struct vm_area_struct *vma)
343{
344 unsigned long ret;
345 char *vfrom, *vto;
346
347 vfrom = kmap_local_page(from);
348 vto = kmap_local_page(to);
349 ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
350 if (!ret)
351 kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
352 kunmap_local(vto);
353 kunmap_local(vfrom);
354
28bdacbc
KW
355 if (ret)
356 memory_failure_queue(page_to_pfn(from), 0);
357
a873dfe1
TL
358 return ret;
359}
a873dfe1 360
6efc7afb 361static inline int copy_mc_highpage(struct page *to, struct page *from)
1da177e4 362{
6efc7afb 363 unsigned long ret;
1da177e4
LT
364 char *vfrom, *vto;
365
d2c20e51
IW
366 vfrom = kmap_local_page(from);
367 vto = kmap_local_page(to);
6efc7afb
JY
368 ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
369 if (!ret)
370 kmsan_copy_page_meta(to, from);
d2c20e51
IW
371 kunmap_local(vto);
372 kunmap_local(vfrom);
6efc7afb 373
28bdacbc
KW
374 if (ret)
375 memory_failure_queue(page_to_pfn(from), 0);
376
6efc7afb
JY
377 return ret;
378}
379#else
380static inline int copy_mc_user_highpage(struct page *to, struct page *from,
381 unsigned long vaddr, struct vm_area_struct *vma)
382{
383 copy_user_highpage(to, from, vaddr, vma);
384 return 0;
1da177e4
LT
385}
386
6efc7afb
JY
387static inline int copy_mc_highpage(struct page *to, struct page *from)
388{
389 copy_highpage(to, from);
390 return 0;
391}
a4602b62
KA
392#endif
393
6a0996db
IW
394static inline void memcpy_page(struct page *dst_page, size_t dst_off,
395 struct page *src_page, size_t src_off,
396 size_t len)
397{
398 char *dst = kmap_local_page(dst_page);
399 char *src = kmap_local_page(src_page);
400
ca18f6ea 401 VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
6a0996db
IW
402 memcpy(dst + dst_off, src + src_off, len);
403 kunmap_local(src);
404 kunmap_local(dst);
405}
406
6a0996db
IW
407static inline void memset_page(struct page *page, size_t offset, int val,
408 size_t len)
409{
410 char *addr = kmap_local_page(page);
411
ca18f6ea 412 VM_BUG_ON(offset + len > PAGE_SIZE);
6a0996db
IW
413 memset(addr + offset, val, len);
414 kunmap_local(addr);
415}
416
bb90d4bc
IW
417static inline void memcpy_from_page(char *to, struct page *page,
418 size_t offset, size_t len)
419{
61b205f5 420 char *from = kmap_local_page(page);
bb90d4bc 421
ca18f6ea 422 VM_BUG_ON(offset + len > PAGE_SIZE);
bb90d4bc 423 memcpy(to, from + offset, len);
61b205f5 424 kunmap_local(from);
bb90d4bc
IW
425}
426
427static inline void memcpy_to_page(struct page *page, size_t offset,
428 const char *from, size_t len)
429{
61b205f5 430 char *to = kmap_local_page(page);
bb90d4bc 431
ca18f6ea 432 VM_BUG_ON(offset + len > PAGE_SIZE);
bb90d4bc 433 memcpy(to + offset, from, len);
8dad53a1 434 flush_dcache_page(page);
61b205f5 435 kunmap_local(to);
bb90d4bc
IW
436}
437
28961998
IW
438static inline void memzero_page(struct page *page, size_t offset, size_t len)
439{
d9a42b53 440 char *addr = kmap_local_page(page);
f38adfef
FDF
441
442 VM_BUG_ON(offset + len > PAGE_SIZE);
28961998 443 memset(addr + offset, 0, len);
8dad53a1 444 flush_dcache_page(page);
d9a42b53 445 kunmap_local(addr);
28961998
IW
446}
447
9af47276
MWO
448/**
449 * memcpy_from_folio - Copy a range of bytes from a folio.
450 * @to: The memory to copy to.
451 * @folio: The folio to read from.
452 * @offset: The first byte in the folio to read.
453 * @len: The number of bytes to copy.
454 */
b23d03ef
MWO
455static inline void memcpy_from_folio(char *to, struct folio *folio,
456 size_t offset, size_t len)
457{
458 VM_BUG_ON(offset + len > folio_size(folio));
459
460 do {
461 const char *from = kmap_local_folio(folio, offset);
462 size_t chunk = len;
463
464 if (folio_test_highmem(folio) &&
465 chunk > PAGE_SIZE - offset_in_page(offset))
466 chunk = PAGE_SIZE - offset_in_page(offset);
467 memcpy(to, from, chunk);
468 kunmap_local(from);
469
73424d00 470 to += chunk;
b23d03ef
MWO
471 offset += chunk;
472 len -= chunk;
473 } while (len > 0);
474}
475
9af47276
MWO
476/**
477 * memcpy_to_folio - Copy a range of bytes to a folio.
478 * @folio: The folio to write to.
479 * @offset: The first byte in the folio to store to.
480 * @from: The memory to copy from.
481 * @len: The number of bytes to copy.
482 */
b23d03ef
MWO
483static inline void memcpy_to_folio(struct folio *folio, size_t offset,
484 const char *from, size_t len)
485{
486 VM_BUG_ON(offset + len > folio_size(folio));
487
488 do {
489 char *to = kmap_local_folio(folio, offset);
490 size_t chunk = len;
491
492 if (folio_test_highmem(folio) &&
493 chunk > PAGE_SIZE - offset_in_page(offset))
494 chunk = PAGE_SIZE - offset_in_page(offset);
495 memcpy(to, from, chunk);
496 kunmap_local(to);
497
498 from += chunk;
499 offset += chunk;
500 len -= chunk;
501 } while (len > 0);
502
503 flush_dcache_folio(folio);
504}
505
a4fc4a0c
MWO
506/**
507 * folio_zero_tail - Zero the tail of a folio.
508 * @folio: The folio to zero.
509 * @offset: The byte offset in the folio to start zeroing at.
510 * @kaddr: The address the folio is currently mapped to.
511 *
512 * If you have already used kmap_local_folio() to map a folio, written
513 * some data to it and now need to zero the end of the folio (and flush
514 * the dcache), you can use this function. If you do not have the
515 * folio kmapped (eg the folio has been partially populated by DMA),
516 * use folio_zero_range() or folio_zero_segment() instead.
517 *
518 * Return: An address which can be passed to kunmap_local().
519 */
520static inline __must_check void *folio_zero_tail(struct folio *folio,
521 size_t offset, void *kaddr)
522{
523 size_t len = folio_size(folio) - offset;
524
525 if (folio_test_highmem(folio)) {
526 size_t max = PAGE_SIZE - offset_in_page(offset);
527
528 while (len > max) {
529 memset(kaddr, 0, max);
530 kunmap_local(kaddr);
531 len -= max;
532 offset += max;
533 max = PAGE_SIZE;
534 kaddr = kmap_local_folio(folio, offset);
535 }
536 }
537
538 memset(kaddr, 0, len);
539 flush_dcache_folio(folio);
540
541 return kaddr;
542}
543
6eaa266b
MWO
544/**
545 * folio_fill_tail - Copy some data to a folio and pad with zeroes.
546 * @folio: The destination folio.
547 * @offset: The offset into @folio at which to start copying.
548 * @from: The data to copy.
549 * @len: How many bytes of data to copy.
550 *
551 * This function is most useful for filesystems which support inline data.
552 * When they want to copy data from the inode into the page cache, this
553 * function does everything for them. It supports large folios even on
554 * HIGHMEM configurations.
555 */
556static inline void folio_fill_tail(struct folio *folio, size_t offset,
557 const char *from, size_t len)
558{
559 char *to = kmap_local_folio(folio, offset);
560
561 VM_BUG_ON(offset + len > folio_size(folio));
562
563 if (folio_test_highmem(folio)) {
564 size_t max = PAGE_SIZE - offset_in_page(offset);
565
566 while (len > max) {
567 memcpy(to, from, max);
568 kunmap_local(to);
569 len -= max;
570 from += max;
571 offset += max;
572 max = PAGE_SIZE;
573 to = kmap_local_folio(folio, offset);
574 }
575 }
576
577 memcpy(to, from, len);
578 to = folio_zero_tail(folio, offset + len, to + len);
579 kunmap_local(to);
580}
581
00cdf760
MWO
582/**
583 * memcpy_from_file_folio - Copy some bytes from a file folio.
584 * @to: The destination buffer.
585 * @folio: The folio to copy from.
586 * @pos: The position in the file.
587 * @len: The maximum number of bytes to copy.
588 *
589 * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE
590 * if the folio comes from HIGHMEM, and by the size of the folio.
591 *
592 * Return: The number of bytes copied from the folio.
593 */
594static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
595 loff_t pos, size_t len)
596{
597 size_t offset = offset_in_folio(folio, pos);
598 char *from = kmap_local_folio(folio, offset);
599
c643e6eb
MWO
600 if (folio_test_highmem(folio)) {
601 offset = offset_in_page(offset);
00cdf760 602 len = min_t(size_t, len, PAGE_SIZE - offset);
c643e6eb 603 } else
00cdf760
MWO
604 len = min(len, folio_size(folio) - offset);
605
606 memcpy(to, from, len);
607 kunmap_local(from);
608
609 return len;
610}
611
c0357139
MWO
612/**
613 * folio_zero_segments() - Zero two byte ranges in a folio.
614 * @folio: The folio to write to.
615 * @start1: The first byte to zero.
616 * @xend1: One more than the last byte in the first range.
617 * @start2: The first byte to zero in the second range.
618 * @xend2: One more than the last byte in the second range.
619 */
620static inline void folio_zero_segments(struct folio *folio,
621 size_t start1, size_t xend1, size_t start2, size_t xend2)
622{
623 zero_user_segments(&folio->page, start1, xend1, start2, xend2);
624}
625
626/**
627 * folio_zero_segment() - Zero a byte range in a folio.
628 * @folio: The folio to write to.
629 * @start: The first byte to zero.
630 * @xend: One more than the last byte to zero.
631 */
632static inline void folio_zero_segment(struct folio *folio,
633 size_t start, size_t xend)
634{
635 zero_user_segments(&folio->page, start, xend, 0, 0);
636}
637
638/**
639 * folio_zero_range() - Zero a byte range in a folio.
640 * @folio: The folio to write to.
641 * @start: The first byte to zero.
642 * @length: The number of bytes to zero.
643 */
644static inline void folio_zero_range(struct folio *folio,
645 size_t start, size_t length)
646{
647 zero_user_segments(&folio->page, start, start + length, 0, 0);
648}
649
3de6047f
MWO
650/**
651 * folio_release_kmap - Unmap a folio and drop a refcount.
652 * @folio: The folio to release.
653 * @addr: The address previously returned by a call to kmap_local_folio().
654 *
655 * It is common, eg in directory handling to kmap a folio. This function
656 * unmaps the folio and drops the refcount that was being held to keep the
657 * folio alive while we accessed it.
658 */
659static inline void folio_release_kmap(struct folio *folio, void *addr)
849ad04c
AV
660{
661 kunmap_local(addr);
3de6047f
MWO
662 folio_put(folio);
663}
664
665static inline void unmap_and_put_page(struct page *page, void *addr)
666{
667 folio_release_kmap(page_folio(page), addr);
849ad04c
AV
668}
669
1da177e4 670#endif /* _LINUX_HIGHMEM_H */