Merge git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf
[linux-block.git] / include / linux / highmem.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_HIGHMEM_H
3#define _LINUX_HIGHMEM_H
4
1da177e4 5#include <linux/fs.h>
597781f3 6#include <linux/kernel.h>
187f1882 7#include <linux/bug.h>
522a0032 8#include <linux/cacheflush.h>
1da177e4 9#include <linux/mm.h>
ad76fb6b 10#include <linux/uaccess.h>
43b3a0c7 11#include <linux/hardirq.h>
1da177e4 12
13f876ba 13#include "highmem-internal.h"
03beb076 14
13f876ba
TG
15/**
16 * kmap - Map a page for long term usage
17 * @page: Pointer to the page to be mapped
18 *
19 * Returns: The virtual address of the mapping
20 *
21 * Can only be invoked from preemptible task context because on 32bit
22 * systems with CONFIG_HIGHMEM enabled this function might sleep.
23 *
24 * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
25 * this returns the virtual address of the direct kernel mapping.
26 *
27 * The returned virtual address is globally visible and valid up to the
28 * point where it is unmapped via kunmap(). The pointer can be handed to
29 * other contexts.
30 *
31 * For highmem pages on 32bit systems this can be slow as the mapping space
32 * is limited and protected by a global lock. In case that there is no
33 * mapping slot available the function blocks until a slot is released via
34 * kunmap().
298fa1ad 35 */
13f876ba 36static inline void *kmap(struct page *page);
525aaf9b 37
13f876ba
TG
38/**
39 * kunmap - Unmap the virtual address mapped by kmap()
e7392b4e 40 * @page: Pointer to the page which was mapped by kmap()
13f876ba
TG
41 *
42 * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
43 * pages in the low memory area.
78b6d91e 44 */
13f876ba 45static inline void kunmap(struct page *page);
298fa1ad 46
13f876ba
TG
47/**
48 * kmap_to_page - Get the page for a kmap'ed address
49 * @addr: The address to look up
50 *
51 * Returns: The page which is mapped to @addr.
52 */
53static inline struct page *kmap_to_page(void *addr);
1da177e4 54
13f876ba
TG
55/**
56 * kmap_flush_unused - Flush all unused kmap mappings in order to
57 * remove stray mappings
58 */
59static inline void kmap_flush_unused(void);
1da177e4 60
13f876ba 61/**
f3ba3c71 62 * kmap_local_page - Map a page for temporary usage
729337bc 63 * @page: Pointer to the page to be mapped
13f876ba
TG
64 *
65 * Returns: The virtual address of the mapping
66 *
383bbef2 67 * Can be invoked from any context, including interrupts.
13f876ba
TG
68 *
69 * Requires careful handling when nesting multiple mappings because the map
70 * management is stack based. The unmap has to be in the reverse order of
71 * the map operation:
72 *
f3ba3c71
TG
73 * addr1 = kmap_local_page(page1);
74 * addr2 = kmap_local_page(page2);
13f876ba 75 * ...
f3ba3c71
TG
76 * kunmap_local(addr2);
77 * kunmap_local(addr1);
13f876ba
TG
78 *
79 * Unmapping addr1 before addr2 is invalid and causes malfunction.
80 *
81 * Contrary to kmap() mappings the mapping is only valid in the context of
82 * the caller and cannot be handed to other contexts.
83 *
84 * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
85 * virtual address of the direct mapping. Only real highmem pages are
86 * temporarily mapped.
87 *
f3ba3c71 88 * While it is significantly faster than kmap() for the higmem case it
72f1c55a 89 * comes with restrictions about the pointer validity.
f3ba3c71
TG
90 *
91 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
92 * disabling migration in order to keep the virtual address stable across
93 * preemption. No caller of kmap_local_page() can rely on this side effect.
94 */
95static inline void *kmap_local_page(struct page *page);
96
53c36de0
MWO
97/**
98 * kmap_local_folio - Map a page in this folio for temporary usage
99 * @folio: The folio containing the page.
100 * @offset: The byte offset within the folio which identifies the page.
101 *
102 * Requires careful handling when nesting multiple mappings because the map
103 * management is stack based. The unmap has to be in the reverse order of
104 * the map operation::
105 *
106 * addr1 = kmap_local_folio(folio1, offset1);
107 * addr2 = kmap_local_folio(folio2, offset2);
108 * ...
109 * kunmap_local(addr2);
110 * kunmap_local(addr1);
111 *
112 * Unmapping addr1 before addr2 is invalid and causes malfunction.
113 *
114 * Contrary to kmap() mappings the mapping is only valid in the context of
115 * the caller and cannot be handed to other contexts.
116 *
117 * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
118 * virtual address of the direct mapping. Only real highmem pages are
119 * temporarily mapped.
120 *
121 * While it is significantly faster than kmap() for the higmem case it
122 * comes with restrictions about the pointer validity. Only use when really
123 * necessary.
124 *
125 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
126 * disabling migration in order to keep the virtual address stable across
127 * preemption. No caller of kmap_local_folio() can rely on this side effect.
128 *
129 * Context: Can be invoked from any context.
130 * Return: The virtual address of @offset.
131 */
132static inline void *kmap_local_folio(struct folio *folio, size_t offset);
133
f3ba3c71
TG
134/**
135 * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
136 * @page: Pointer to the page to be mapped
137 *
138 * Returns: The virtual address of the mapping
139 *
e7392b4e
FDF
140 * In fact a wrapper around kmap_local_page() which also disables pagefaults
141 * and, depending on PREEMPT_RT configuration, also CPU migration and
142 * preemption. Therefore users should not count on the latter two side effects.
143 *
144 * Mappings should always be released by kunmap_atomic().
f3ba3c71
TG
145 *
146 * Do not use in new code. Use kmap_local_page() instead.
85a85e76
FDF
147 *
148 * It is used in atomic context when code wants to access the contents of a
149 * page that might be allocated from high memory (see __GFP_HIGHMEM), for
150 * example a page in the pagecache. The API has two functions, and they
cffe57be 151 * can be used in a manner similar to the following::
85a85e76 152 *
cffe57be
BS
153 * // Find the page of interest.
154 * struct page *page = find_get_page(mapping, offset);
85a85e76 155 *
cffe57be
BS
156 * // Gain access to the contents of that page.
157 * void *vaddr = kmap_atomic(page);
85a85e76 158 *
cffe57be
BS
159 * // Do something to the contents of that page.
160 * memset(vaddr, 0, PAGE_SIZE);
85a85e76 161 *
cffe57be
BS
162 * // Unmap that page.
163 * kunmap_atomic(vaddr);
85a85e76
FDF
164 *
165 * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
166 * call, not the argument.
167 *
168 * If you need to map two pages because you want to copy from one page to
169 * another you need to keep the kmap_atomic calls strictly nested, like:
170 *
171 * vaddr1 = kmap_atomic(page1);
172 * vaddr2 = kmap_atomic(page2);
173 *
174 * memcpy(vaddr1, vaddr2, PAGE_SIZE);
175 *
176 * kunmap_atomic(vaddr2);
177 * kunmap_atomic(vaddr1);
13f876ba
TG
178 */
179static inline void *kmap_atomic(struct page *page);
5a178119 180
13f876ba
TG
181/* Highmem related interfaces for management code */
182static inline unsigned int nr_free_highpages(void);
183static inline unsigned long totalhigh_pages(void);
1da177e4 184
13f876ba
TG
185#ifndef ARCH_HAS_FLUSH_ANON_PAGE
186static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
e23c4597
IW
187{
188}
7438f363 189#endif
1da177e4 190
f358afc5 191#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
13f876ba 192static inline void flush_kernel_vmap_range(void *vaddr, int size)
298fa1ad 193{
298fa1ad 194}
13f876ba 195static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
298fa1ad 196{
298fa1ad 197}
7438f363 198#endif
980c19e3 199
1da177e4 200/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
487ff320 201#ifndef clear_user_highpage
1da177e4
LT
202static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
203{
d2c20e51 204 void *addr = kmap_local_page(page);
1da177e4 205 clear_user_page(addr, vaddr, page);
d2c20e51 206 kunmap_local(addr);
1da177e4 207}
487ff320 208#endif
1da177e4 209
92638b4e 210#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
769848c0 211/**
92638b4e 212 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
769848c0
MG
213 * @vma: The VMA the page is to be allocated for
214 * @vaddr: The virtual address the page will be inserted into
215 *
e7392b4e
FDF
216 * Returns: The allocated and zeroed HIGHMEM page
217 *
92638b4e
PC
218 * This function will allocate a page for a VMA that the caller knows will
219 * be able to migrate in the future using move_pages() or reclaimed
769848c0
MG
220 *
221 * An architecture may override this function by defining
92638b4e 222 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
769848c0
MG
223 * implementation.
224 */
1da177e4 225static inline struct page *
92638b4e
PC
226alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
227 unsigned long vaddr)
1da177e4 228{
92638b4e 229 struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
1da177e4
LT
230
231 if (page)
232 clear_user_highpage(page, vaddr);
233
234 return page;
235}
236#endif
237
238static inline void clear_highpage(struct page *page)
239{
d2c20e51 240 void *kaddr = kmap_local_page(page);
1da177e4 241 clear_page(kaddr);
d2c20e51 242 kunmap_local(kaddr);
1da177e4
LT
243}
244
d9da8f6c
AK
245static inline void clear_highpage_kasan_tagged(struct page *page)
246{
247 u8 tag;
248
249 tag = page_kasan_tag(page);
250 page_kasan_tag_reset(page);
251 clear_highpage(page);
252 page_kasan_tag_set(page, tag);
253}
254
013bb59d
PC
255#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
256
257static inline void tag_clear_highpage(struct page *page)
258{
259}
260
261#endif
262
0060ef3b
MWO
263/*
264 * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
265 * If we pass in a head page, we can zero up to the size of the compound page.
266 */
c0357139 267#ifdef CONFIG_HIGHMEM
0060ef3b
MWO
268void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
269 unsigned start2, unsigned end2);
c0357139 270#else
eebd2aa3 271static inline void zero_user_segments(struct page *page,
0060ef3b
MWO
272 unsigned start1, unsigned end1,
273 unsigned start2, unsigned end2)
eebd2aa3 274{
d2c20e51 275 void *kaddr = kmap_local_page(page);
0060ef3b 276 unsigned int i;
eebd2aa3 277
0060ef3b 278 BUG_ON(end1 > page_size(page) || end2 > page_size(page));
eebd2aa3
CL
279
280 if (end1 > start1)
281 memset(kaddr + start1, 0, end1 - start1);
282
283 if (end2 > start2)
284 memset(kaddr + start2, 0, end2 - start2);
285
d2c20e51 286 kunmap_local(kaddr);
0060ef3b
MWO
287 for (i = 0; i < compound_nr(page); i++)
288 flush_dcache_page(page + i);
eebd2aa3 289}
c0357139 290#endif
eebd2aa3
CL
291
292static inline void zero_user_segment(struct page *page,
293 unsigned start, unsigned end)
294{
295 zero_user_segments(page, start, end, 0, 0);
296}
297
298static inline void zero_user(struct page *page,
299 unsigned start, unsigned size)
300{
301 zero_user_segments(page, start, start + size, 0, 0);
302}
01f2705d 303
77fff4ae
AN
304#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
305
9de455b2
AN
306static inline void copy_user_highpage(struct page *to, struct page *from,
307 unsigned long vaddr, struct vm_area_struct *vma)
1da177e4
LT
308{
309 char *vfrom, *vto;
310
d2c20e51
IW
311 vfrom = kmap_local_page(from);
312 vto = kmap_local_page(to);
1da177e4 313 copy_user_page(vto, vfrom, vaddr, to);
d2c20e51
IW
314 kunmap_local(vto);
315 kunmap_local(vfrom);
1da177e4
LT
316}
317
77fff4ae
AN
318#endif
319
a4602b62
KA
320#ifndef __HAVE_ARCH_COPY_HIGHPAGE
321
1da177e4
LT
322static inline void copy_highpage(struct page *to, struct page *from)
323{
324 char *vfrom, *vto;
325
d2c20e51
IW
326 vfrom = kmap_local_page(from);
327 vto = kmap_local_page(to);
1da177e4 328 copy_page(vto, vfrom);
d2c20e51
IW
329 kunmap_local(vto);
330 kunmap_local(vfrom);
1da177e4
LT
331}
332
a4602b62
KA
333#endif
334
6a0996db
IW
335static inline void memcpy_page(struct page *dst_page, size_t dst_off,
336 struct page *src_page, size_t src_off,
337 size_t len)
338{
339 char *dst = kmap_local_page(dst_page);
340 char *src = kmap_local_page(src_page);
341
ca18f6ea 342 VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
6a0996db
IW
343 memcpy(dst + dst_off, src + src_off, len);
344 kunmap_local(src);
345 kunmap_local(dst);
346}
347
6a0996db
IW
348static inline void memset_page(struct page *page, size_t offset, int val,
349 size_t len)
350{
351 char *addr = kmap_local_page(page);
352
ca18f6ea 353 VM_BUG_ON(offset + len > PAGE_SIZE);
6a0996db
IW
354 memset(addr + offset, val, len);
355 kunmap_local(addr);
356}
357
bb90d4bc
IW
358static inline void memcpy_from_page(char *to, struct page *page,
359 size_t offset, size_t len)
360{
61b205f5 361 char *from = kmap_local_page(page);
bb90d4bc 362
ca18f6ea 363 VM_BUG_ON(offset + len > PAGE_SIZE);
bb90d4bc 364 memcpy(to, from + offset, len);
61b205f5 365 kunmap_local(from);
bb90d4bc
IW
366}
367
368static inline void memcpy_to_page(struct page *page, size_t offset,
369 const char *from, size_t len)
370{
61b205f5 371 char *to = kmap_local_page(page);
bb90d4bc 372
ca18f6ea 373 VM_BUG_ON(offset + len > PAGE_SIZE);
bb90d4bc 374 memcpy(to + offset, from, len);
8dad53a1 375 flush_dcache_page(page);
61b205f5 376 kunmap_local(to);
bb90d4bc
IW
377}
378
28961998
IW
379static inline void memzero_page(struct page *page, size_t offset, size_t len)
380{
d9a42b53 381 char *addr = kmap_local_page(page);
f38adfef
FDF
382
383 VM_BUG_ON(offset + len > PAGE_SIZE);
28961998 384 memset(addr + offset, 0, len);
8dad53a1 385 flush_dcache_page(page);
d9a42b53 386 kunmap_local(addr);
28961998
IW
387}
388
c0357139
MWO
389/**
390 * folio_zero_segments() - Zero two byte ranges in a folio.
391 * @folio: The folio to write to.
392 * @start1: The first byte to zero.
393 * @xend1: One more than the last byte in the first range.
394 * @start2: The first byte to zero in the second range.
395 * @xend2: One more than the last byte in the second range.
396 */
397static inline void folio_zero_segments(struct folio *folio,
398 size_t start1, size_t xend1, size_t start2, size_t xend2)
399{
400 zero_user_segments(&folio->page, start1, xend1, start2, xend2);
401}
402
403/**
404 * folio_zero_segment() - Zero a byte range in a folio.
405 * @folio: The folio to write to.
406 * @start: The first byte to zero.
407 * @xend: One more than the last byte to zero.
408 */
409static inline void folio_zero_segment(struct folio *folio,
410 size_t start, size_t xend)
411{
412 zero_user_segments(&folio->page, start, xend, 0, 0);
413}
414
415/**
416 * folio_zero_range() - Zero a byte range in a folio.
417 * @folio: The folio to write to.
418 * @start: The first byte to zero.
419 * @length: The number of bytes to zero.
420 */
421static inline void folio_zero_range(struct folio *folio,
422 size_t start, size_t length)
423{
424 zero_user_segments(&folio->page, start, start + length, 0, 0);
425}
426
1da177e4 427#endif /* _LINUX_HIGHMEM_H */