1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_H
3 #define _LINUX_HIGHMEM_H
6 #include <linux/kernel.h>
8 #include <linux/cacheflush.h>
9 #include <linux/kmsan.h>
11 #include <linux/uaccess.h>
12 #include <linux/hardirq.h>
14 #include "highmem-internal.h"
17 * kmap - Map a page for long term usage
18 * @page: Pointer to the page to be mapped
20 * Returns: The virtual address of the mapping
22 * Can only be invoked from preemptible task context because on 32bit
23 * systems with CONFIG_HIGHMEM enabled this function might sleep.
25 * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
26 * this returns the virtual address of the direct kernel mapping.
28 * The returned virtual address is globally visible and valid up to the
29 * point where it is unmapped via kunmap(). The pointer can be handed to
32 * For highmem pages on 32bit systems this can be slow as the mapping space
33 * is limited and protected by a global lock. In case that there is no
34 * mapping slot available the function blocks until a slot is released via
37 static inline void *kmap(struct page *page);
40 * kunmap - Unmap the virtual address mapped by kmap()
41 * @page: Pointer to the page which was mapped by kmap()
43 * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
44 * pages in the low memory area.
46 static inline void kunmap(struct page *page);
49 * kmap_to_page - Get the page for a kmap'ed address
50 * @addr: The address to look up
52 * Returns: The page which is mapped to @addr.
54 static inline struct page *kmap_to_page(void *addr);
57 * kmap_flush_unused - Flush all unused kmap mappings in order to
58 * remove stray mappings
60 static inline void kmap_flush_unused(void);
63 * kmap_local_page - Map a page for temporary usage
64 * @page: Pointer to the page to be mapped
66 * Returns: The virtual address of the mapping
68 * Can be invoked from any context, including interrupts.
70 * Requires careful handling when nesting multiple mappings because the map
71 * management is stack based. The unmap has to be in the reverse order of
74 * addr1 = kmap_local_page(page1);
75 * addr2 = kmap_local_page(page2);
77 * kunmap_local(addr2);
78 * kunmap_local(addr1);
80 * Unmapping addr1 before addr2 is invalid and causes malfunction.
82 * Contrary to kmap() mappings the mapping is only valid in the context of
83 * the caller and cannot be handed to other contexts.
85 * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
86 * virtual address of the direct mapping. Only real highmem pages are
89 * While kmap_local_page() is significantly faster than kmap() for the highmem
90 * case it comes with restrictions about the pointer validity.
92 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
93 * disabling migration in order to keep the virtual address stable across
94 * preemption. No caller of kmap_local_page() can rely on this side effect.
96 static inline void *kmap_local_page(struct page *page);
99 * kmap_local_folio - Map a page in this folio for temporary usage
100 * @folio: The folio containing the page.
101 * @offset: The byte offset within the folio which identifies the page.
103 * Requires careful handling when nesting multiple mappings because the map
104 * management is stack based. The unmap has to be in the reverse order of
105 * the map operation::
107 * addr1 = kmap_local_folio(folio1, offset1);
108 * addr2 = kmap_local_folio(folio2, offset2);
110 * kunmap_local(addr2);
111 * kunmap_local(addr1);
113 * Unmapping addr1 before addr2 is invalid and causes malfunction.
115 * Contrary to kmap() mappings the mapping is only valid in the context of
116 * the caller and cannot be handed to other contexts.
118 * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
119 * virtual address of the direct mapping. Only real highmem pages are
120 * temporarily mapped.
122 * While it is significantly faster than kmap() for the highmem case it
123 * comes with restrictions about the pointer validity.
125 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
126 * disabling migration in order to keep the virtual address stable across
127 * preemption. No caller of kmap_local_folio() can rely on this side effect.
129 * Context: Can be invoked from any context.
130 * Return: The virtual address of @offset.
132 static inline void *kmap_local_folio(struct folio *folio, size_t offset);
135 * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
136 * @page: Pointer to the page to be mapped
138 * Returns: The virtual address of the mapping
140 * In fact a wrapper around kmap_local_page() which also disables pagefaults
141 * and, depending on PREEMPT_RT configuration, also CPU migration and
142 * preemption. Therefore users should not count on the latter two side effects.
144 * Mappings should always be released by kunmap_atomic().
146 * Do not use in new code. Use kmap_local_page() instead.
148 * It is used in atomic context when code wants to access the contents of a
149 * page that might be allocated from high memory (see __GFP_HIGHMEM), for
150 * example a page in the pagecache. The API has two functions, and they
151 * can be used in a manner similar to the following::
153 * // Find the page of interest.
154 * struct page *page = find_get_page(mapping, offset);
156 * // Gain access to the contents of that page.
157 * void *vaddr = kmap_atomic(page);
159 * // Do something to the contents of that page.
160 * memset(vaddr, 0, PAGE_SIZE);
162 * // Unmap that page.
163 * kunmap_atomic(vaddr);
165 * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
166 * call, not the argument.
168 * If you need to map two pages because you want to copy from one page to
169 * another you need to keep the kmap_atomic calls strictly nested, like:
171 * vaddr1 = kmap_atomic(page1);
172 * vaddr2 = kmap_atomic(page2);
174 * memcpy(vaddr1, vaddr2, PAGE_SIZE);
176 * kunmap_atomic(vaddr2);
177 * kunmap_atomic(vaddr1);
179 static inline void *kmap_atomic(struct page *page);
181 /* Highmem related interfaces for management code */
182 static inline unsigned int nr_free_highpages(void);
183 static inline unsigned long totalhigh_pages(void);
185 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
186 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
191 #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
192 static inline void flush_kernel_vmap_range(void *vaddr, int size)
195 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
200 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
201 #ifndef clear_user_highpage
202 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
204 void *addr = kmap_local_page(page);
205 clear_user_page(addr, vaddr, page);
210 #ifndef vma_alloc_zeroed_movable_folio
212 * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
213 * @vma: The VMA the page is to be allocated for.
214 * @vaddr: The virtual address the page will be inserted into.
216 * This function will allocate a page suitable for inserting into this
217 * VMA at this virtual address. It may be allocated from highmem or
218 * the movable zone. An architecture may provide its own implementation.
220 * Return: A folio containing one allocated and zeroed page or NULL if
221 * we are out of memory.
224 struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
229 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
231 clear_user_highpage(&folio->page, vaddr);
237 static inline void clear_highpage(struct page *page)
239 void *kaddr = kmap_local_page(page);
244 static inline void clear_highpage_kasan_tagged(struct page *page)
246 void *kaddr = kmap_local_page(page);
248 clear_page(kasan_reset_tag(kaddr));
252 #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
254 static inline void tag_clear_highpage(struct page *page)
261 * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
262 * If we pass in a head page, we can zero up to the size of the compound page.
264 #ifdef CONFIG_HIGHMEM
265 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
266 unsigned start2, unsigned end2);
268 static inline void zero_user_segments(struct page *page,
269 unsigned start1, unsigned end1,
270 unsigned start2, unsigned end2)
272 void *kaddr = kmap_local_page(page);
275 BUG_ON(end1 > page_size(page) || end2 > page_size(page));
278 memset(kaddr + start1, 0, end1 - start1);
281 memset(kaddr + start2, 0, end2 - start2);
284 for (i = 0; i < compound_nr(page); i++)
285 flush_dcache_page(page + i);
289 static inline void zero_user_segment(struct page *page,
290 unsigned start, unsigned end)
292 zero_user_segments(page, start, end, 0, 0);
295 static inline void zero_user(struct page *page,
296 unsigned start, unsigned size)
298 zero_user_segments(page, start, start + size, 0, 0);
301 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
303 static inline void copy_user_highpage(struct page *to, struct page *from,
304 unsigned long vaddr, struct vm_area_struct *vma)
308 vfrom = kmap_local_page(from);
309 vto = kmap_local_page(to);
310 copy_user_page(vto, vfrom, vaddr, to);
311 kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
318 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
320 static inline void copy_highpage(struct page *to, struct page *from)
324 vfrom = kmap_local_page(from);
325 vto = kmap_local_page(to);
326 copy_page(vto, vfrom);
327 kmsan_copy_page_meta(to, from);
334 #ifdef copy_mc_to_kernel
336 * If architecture supports machine check exception handling, define the
337 * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
338 * page with #MC in source page (@from) handled, and return the number
339 * of bytes not copied if there was a #MC, otherwise 0 for success.
341 static inline int copy_mc_user_highpage(struct page *to, struct page *from,
342 unsigned long vaddr, struct vm_area_struct *vma)
347 vfrom = kmap_local_page(from);
348 vto = kmap_local_page(to);
349 ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
351 kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
358 static inline int copy_mc_highpage(struct page *to, struct page *from)
363 vfrom = kmap_local_page(from);
364 vto = kmap_local_page(to);
365 ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
367 kmsan_copy_page_meta(to, from);
374 static inline int copy_mc_user_highpage(struct page *to, struct page *from,
375 unsigned long vaddr, struct vm_area_struct *vma)
377 copy_user_highpage(to, from, vaddr, vma);
381 static inline int copy_mc_highpage(struct page *to, struct page *from)
383 copy_highpage(to, from);
388 static inline void memcpy_page(struct page *dst_page, size_t dst_off,
389 struct page *src_page, size_t src_off,
392 char *dst = kmap_local_page(dst_page);
393 char *src = kmap_local_page(src_page);
395 VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
396 memcpy(dst + dst_off, src + src_off, len);
401 static inline void memset_page(struct page *page, size_t offset, int val,
404 char *addr = kmap_local_page(page);
406 VM_BUG_ON(offset + len > PAGE_SIZE);
407 memset(addr + offset, val, len);
411 static inline void memcpy_from_page(char *to, struct page *page,
412 size_t offset, size_t len)
414 char *from = kmap_local_page(page);
416 VM_BUG_ON(offset + len > PAGE_SIZE);
417 memcpy(to, from + offset, len);
421 static inline void memcpy_to_page(struct page *page, size_t offset,
422 const char *from, size_t len)
424 char *to = kmap_local_page(page);
426 VM_BUG_ON(offset + len > PAGE_SIZE);
427 memcpy(to + offset, from, len);
428 flush_dcache_page(page);
432 static inline void memzero_page(struct page *page, size_t offset, size_t len)
434 char *addr = kmap_local_page(page);
436 VM_BUG_ON(offset + len > PAGE_SIZE);
437 memset(addr + offset, 0, len);
438 flush_dcache_page(page);
443 * memcpy_from_file_folio - Copy some bytes from a file folio.
444 * @to: The destination buffer.
445 * @folio: The folio to copy from.
446 * @pos: The position in the file.
447 * @len: The maximum number of bytes to copy.
449 * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE
450 * if the folio comes from HIGHMEM, and by the size of the folio.
452 * Return: The number of bytes copied from the folio.
454 static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
455 loff_t pos, size_t len)
457 size_t offset = offset_in_folio(folio, pos);
458 char *from = kmap_local_folio(folio, offset);
460 if (folio_test_highmem(folio)) {
461 offset = offset_in_page(offset);
462 len = min_t(size_t, len, PAGE_SIZE - offset);
464 len = min(len, folio_size(folio) - offset);
466 memcpy(to, from, len);
473 * folio_zero_segments() - Zero two byte ranges in a folio.
474 * @folio: The folio to write to.
475 * @start1: The first byte to zero.
476 * @xend1: One more than the last byte in the first range.
477 * @start2: The first byte to zero in the second range.
478 * @xend2: One more than the last byte in the second range.
480 static inline void folio_zero_segments(struct folio *folio,
481 size_t start1, size_t xend1, size_t start2, size_t xend2)
483 zero_user_segments(&folio->page, start1, xend1, start2, xend2);
487 * folio_zero_segment() - Zero a byte range in a folio.
488 * @folio: The folio to write to.
489 * @start: The first byte to zero.
490 * @xend: One more than the last byte to zero.
492 static inline void folio_zero_segment(struct folio *folio,
493 size_t start, size_t xend)
495 zero_user_segments(&folio->page, start, xend, 0, 0);
499 * folio_zero_range() - Zero a byte range in a folio.
500 * @folio: The folio to write to.
501 * @start: The first byte to zero.
502 * @length: The number of bytes to zero.
504 static inline void folio_zero_range(struct folio *folio,
505 size_t start, size_t length)
507 zero_user_segments(&folio->page, start, start + length, 0, 0);
510 static inline void put_and_unmap_page(struct page *page, void *addr)
516 #endif /* _LINUX_HIGHMEM_H */