Merge tag 'mm-stable-2023-04-27-15-30' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / include / linux / uio.h
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
1da177e4
LT
2/*
3 * Berkeley style UIO structures - Alan Cox 1994.
1da177e4 4 */
607ca46e
DH
5#ifndef __LINUX_UIO_H
6#define __LINUX_UIO_H
1da177e4 7
92236878 8#include <linux/kernel.h>
aa28de27 9#include <linux/thread_info.h>
d9c19d32 10#include <linux/mm_types.h>
607ca46e 11#include <uapi/linux/uio.h>
1da177e4 12
92236878 13struct page;
241699cd 14struct pipe_inode_info;
812ed032 15
f62e52d1
DH
16typedef unsigned int __bitwise iov_iter_extraction_t;
17
812ed032
JS
18struct kvec {
19 void *iov_base; /* and that should *never* hold a userland pointer */
20 size_t iov_len;
21};
22
00e23707 23enum iter_type {
875f1d07 24 /* iter types */
8cd54c1c
AV
25 ITER_IOVEC,
26 ITER_KVEC,
27 ITER_BVEC,
28 ITER_PIPE,
29 ITER_XARRAY,
30 ITER_DISCARD,
fcb14cb1 31 ITER_UBUF,
62a8067a
AV
32};
33
de4eda9d
AV
34#define ITER_SOURCE 1 // == WRITE
35#define ITER_DEST 0 // == READ
36
8fb0f47a
JA
37struct iov_iter_state {
38 size_t iov_offset;
39 size_t count;
40 unsigned long nr_segs;
41};
42
92236878 43struct iov_iter {
8cd54c1c 44 u8 iter_type;
3337ab08 45 bool nofault;
8cd54c1c 46 bool data_source;
fcb14cb1 47 bool user_backed;
10f525a8
AV
48 union {
49 size_t iov_offset;
50 int last_offset;
51 };
747b1f65
JA
52 /*
53 * Hack alert: overlay ubuf_iovec with iovec + count, so
54 * that the members resolve correctly regardless of the type
55 * of iterator used. This means that you can use:
56 *
57 * &iter->__ubuf_iovec or iter->__iov
58 *
59 * interchangably for the user_backed cases, hence simplifying
60 * some of the cases that need to deal with both.
61 */
62a8067a 62 union {
747b1f65
JA
63 /*
64 * This really should be a const, but we cannot do that without
65 * also modifying any of the zero-filling iter init functions.
66 * Leave it non-const for now, but it should be treated as such.
67 */
68 struct iovec __ubuf_iovec;
69 struct {
70 union {
71 /* use iter_iov() to get the current vec */
72 const struct iovec *__iov;
73 const struct kvec *kvec;
74 const struct bio_vec *bvec;
75 struct xarray *xarray;
76 struct pipe_inode_info *pipe;
77 void __user *ubuf;
78 };
79 size_t count;
80 };
241699cd
AV
81 };
82 union {
83 unsigned long nr_segs;
27c0e374 84 struct {
8cefc107
DH
85 unsigned int head;
86 unsigned int start_head;
27c0e374 87 };
7ff50620 88 loff_t xarray_start;
62a8067a 89 };
92236878
KO
90};
91
747b1f65
JA
92static inline const struct iovec *iter_iov(const struct iov_iter *iter)
93{
94 if (iter->iter_type == ITER_UBUF)
95 return (const struct iovec *) &iter->__ubuf_iovec;
96 return iter->__iov;
97}
98
95e49cf8
JA
99#define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset)
100#define iter_iov_len(iter) (iter_iov(iter)->iov_len - (iter)->iov_offset)
de4f5fed 101
00e23707
DH
102static inline enum iter_type iov_iter_type(const struct iov_iter *i)
103{
8cd54c1c 104 return i->iter_type;
00e23707
DH
105}
106
8fb0f47a
JA
107static inline void iov_iter_save_state(struct iov_iter *iter,
108 struct iov_iter_state *state)
109{
110 state->iov_offset = iter->iov_offset;
111 state->count = iter->count;
112 state->nr_segs = iter->nr_segs;
113}
114
fcb14cb1
AV
115static inline bool iter_is_ubuf(const struct iov_iter *i)
116{
117 return iov_iter_type(i) == ITER_UBUF;
118}
119
00e23707
DH
120static inline bool iter_is_iovec(const struct iov_iter *i)
121{
122 return iov_iter_type(i) == ITER_IOVEC;
123}
124
125static inline bool iov_iter_is_kvec(const struct iov_iter *i)
126{
127 return iov_iter_type(i) == ITER_KVEC;
128}
129
130static inline bool iov_iter_is_bvec(const struct iov_iter *i)
131{
132 return iov_iter_type(i) == ITER_BVEC;
133}
134
135static inline bool iov_iter_is_pipe(const struct iov_iter *i)
136{
137 return iov_iter_type(i) == ITER_PIPE;
138}
139
9ea9ce04
DH
140static inline bool iov_iter_is_discard(const struct iov_iter *i)
141{
142 return iov_iter_type(i) == ITER_DISCARD;
143}
144
7ff50620
DH
145static inline bool iov_iter_is_xarray(const struct iov_iter *i)
146{
147 return iov_iter_type(i) == ITER_XARRAY;
148}
149
00e23707
DH
150static inline unsigned char iov_iter_rw(const struct iov_iter *i)
151{
8cd54c1c 152 return i->data_source ? WRITE : READ;
00e23707
DH
153}
154
fcb14cb1
AV
155static inline bool user_backed_iter(const struct iov_iter *i)
156{
157 return i->user_backed;
158}
159
1da177e4
LT
160/*
161 * Total number of bytes covered by an iovec.
162 *
163 * NOTE that it is not safe to use this function until all the iovec's
164 * segment lengths have been validated. Because the individual lengths can
165 * overflow a size_t when added together.
166 */
167static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
168{
169 unsigned long seg;
170 size_t ret = 0;
171
172 for (seg = 0; seg < nr_segs; seg++)
173 ret += iov[seg].iov_len;
174 return ret;
175}
176
f0b65f39
AV
177size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
178 size_t bytes, struct iov_iter *i);
92236878 179void iov_iter_advance(struct iov_iter *i, size_t bytes);
27c0e374 180void iov_iter_revert(struct iov_iter *i, size_t bytes);
a6294593 181size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
cdd591fc 182size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
92236878 183size_t iov_iter_single_seg_count(const struct iov_iter *i);
6e58e79d
AV
184size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
185 struct iov_iter *i);
f0d1bec9
AV
186size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
187 struct iov_iter *i);
aa28de27
AV
188
189size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
190size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
aa28de27 191size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
aa28de27 192
d9c19d32
MWO
193static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
194 size_t bytes, struct iov_iter *i)
195{
196 return copy_page_to_iter(&folio->page, offset, bytes, i);
197}
4f80818b
LS
198size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
199 size_t bytes, struct iov_iter *i);
d9c19d32 200
aa28de27
AV
201static __always_inline __must_check
202size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
203{
0e3c3b90 204 if (check_copy_size(addr, bytes, true))
aa28de27 205 return _copy_to_iter(addr, bytes, i);
0e3c3b90 206 return 0;
aa28de27
AV
207}
208
209static __always_inline __must_check
210size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
211{
0e3c3b90 212 if (check_copy_size(addr, bytes, false))
aa28de27 213 return _copy_from_iter(addr, bytes, i);
0e3c3b90 214 return 0;
aa28de27
AV
215}
216
217static __always_inline __must_check
218bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
219{
4b6c132b
AV
220 size_t copied = copy_from_iter(addr, bytes, i);
221 if (likely(copied == bytes))
222 return true;
223 iov_iter_revert(i, copied);
224 return false;
aa28de27
AV
225}
226
227static __always_inline __must_check
228size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
229{
0e3c3b90 230 if (check_copy_size(addr, bytes, false))
aa28de27 231 return _copy_from_iter_nocache(addr, bytes, i);
0e3c3b90 232 return 0;
aa28de27
AV
233}
234
235static __always_inline __must_check
236bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
237{
4b6c132b
AV
238 size_t copied = copy_from_iter_nocache(addr, bytes, i);
239 if (likely(copied == bytes))
240 return true;
241 iov_iter_revert(i, copied);
242 return false;
aa28de27
AV
243}
244
0aed55af
DW
245#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
246/*
247 * Note, users like pmem that depend on the stricter semantics of
e17f7a0b 248 * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
0aed55af
DW
249 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
250 * destination is flushed from the cache on return.
251 */
6a37e940 252size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
0aed55af 253#else
6a37e940
LT
254#define _copy_from_iter_flushcache _copy_from_iter_nocache
255#endif
256
ec6347bb
DW
257#ifdef CONFIG_ARCH_HAS_COPY_MC
258size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
8780356e 259#else
ec6347bb 260#define _copy_mc_to_iter _copy_to_iter
8780356e
DW
261#endif
262
c35e0248 263size_t iov_iter_zero(size_t bytes, struct iov_iter *);
cfa320f7
KB
264bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
265 unsigned len_mask);
886a3911 266unsigned long iov_iter_alignment(const struct iov_iter *i);
357f435d 267unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
aa563d7b 268void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
71d8e532 269 unsigned long nr_segs, size_t count);
aa563d7b 270void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
05afcb77 271 unsigned long nr_segs, size_t count);
aa563d7b 272void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
abb78f87 273 unsigned long nr_segs, size_t count);
aa563d7b 274void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
241699cd 275 size_t count);
9ea9ce04 276void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
7ff50620
DH
277void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
278 loff_t start, size_t count);
d8207640
LG
279ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
280 size_t maxsize, unsigned maxpages, size_t *start,
f62e52d1 281 iov_iter_extraction_t extraction_flags);
eba2d3d7 282ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
2c80929c 283 size_t maxsize, unsigned maxpages, size_t *start);
d8207640
LG
284ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
285 struct page ***pages, size_t maxsize, size_t *start,
f62e52d1 286 iov_iter_extraction_t extraction_flags);
eba2d3d7 287ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
91f79c43 288 size_t maxsize, size_t *start);
f67da30c 289int iov_iter_npages(const struct iov_iter *i, int maxpages);
8fb0f47a 290void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
92236878 291
4b8164b9
AV
292const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
293
b57332b4 294static inline size_t iov_iter_count(const struct iov_iter *i)
92236878
KO
295{
296 return i->count;
297}
298
0b86dbf6
AV
299/*
300 * Cap the iov_iter by given limit; note that the second argument is
301 * *not* the new size - it's upper limit for such. Passing it a value
302 * greater than the amount of data in iov_iter is fine - it'll just do
303 * nothing in that case.
304 */
305static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
0c949334 306{
0b86dbf6
AV
307 /*
308 * count doesn't have to fit in size_t - comparison extends both
309 * operands to u64 here and any value that would be truncated by
310 * conversion in assignement is by definition greater than all
311 * values of size_t, including old i->count.
312 */
7dedd3e1 313 if (i->count > count)
0c949334
AV
314 i->count = count;
315}
316
b42b15fd
AV
317/*
318 * reexpand a previously truncated iterator; count must be no more than how much
319 * we had shrunk it.
320 */
321static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
322{
323 i->count = count;
324}
52cbd23a 325
b93235e6
JK
326static inline int
327iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
328{
329 size_t shorted = 0;
330 int npages;
331
332 if (iov_iter_count(i) > max_bytes) {
333 shorted = iov_iter_count(i) - max_bytes;
334 iov_iter_truncate(i, max_bytes);
335 }
7187440d 336 npages = iov_iter_npages(i, maxpages);
b93235e6
JK
337 if (shorted)
338 iov_iter_reexpand(i, iov_iter_count(i) + shorted);
339
340 return npages;
341}
342
52cbd23a
WB
343struct csum_state {
344 __wsum csum;
345 size_t off;
346};
347
348size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
a604ec7e 349size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
4b6c132b
AV
350
351static __always_inline __must_check
352bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
353 __wsum *csum, struct iov_iter *i)
354{
355 size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
356 if (likely(copied == bytes))
357 return true;
358 iov_iter_revert(i, copied);
359 return false;
360}
d05f4435
SG
361size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
362 struct iov_iter *i);
b42b15fd 363
bfdc5970
CH
364struct iovec *iovec_from_user(const struct iovec __user *uvector,
365 unsigned long nr_segs, unsigned long fast_segs,
366 struct iovec *fast_iov, bool compat);
367ssize_t import_iovec(int type, const struct iovec __user *uvec,
368 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
369 struct iov_iter *i);
370ssize_t __import_iovec(int type, const struct iovec __user *uvec,
371 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
372 struct iov_iter *i, bool compat);
bc917be8
AV
373int import_single_range(int type, void __user *buf, size_t len,
374 struct iovec *iov, struct iov_iter *i);
2ad9bd83 375int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
bc917be8 376
fcb14cb1
AV
377static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
378 void __user *buf, size_t count)
379{
380 WARN_ON(direction & ~(READ | WRITE));
381 *i = (struct iov_iter) {
382 .iter_type = ITER_UBUF,
383 .user_backed = true,
384 .data_source = direction,
385 .ubuf = buf,
cd0bd57a
JA
386 .count = count,
387 .nr_segs = 1
fcb14cb1
AV
388 };
389}
f62e52d1
DH
390/* Flags for iov_iter_get/extract_pages*() */
391/* Allow P2PDMA on the extracted pages */
392#define ITER_ALLOW_P2PDMA ((__force iov_iter_extraction_t)0x01)
393
7d58fe73
DH
394ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
395 size_t maxsize, unsigned int maxpages,
396 iov_iter_extraction_t extraction_flags,
397 size_t *offset0);
398
399/**
400 * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
401 * @iter: The iterator
402 *
403 * Examine the iterator and indicate by returning true or false as to how, if
404 * at all, pages extracted from the iterator will be retained by the extraction
405 * function.
406 *
407 * %true indicates that the pages will have a pin placed in them that the
408 * caller must unpin. This is must be done for DMA/async DIO to force fork()
409 * to forcibly copy a page for the child (the parent must retain the original
410 * page).
411 *
412 * %false indicates that no measures are taken and that it's up to the caller
413 * to retain the pages.
414 */
415static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
416{
417 return user_backed_iter(iter);
418}
fcb14cb1 419
812ed032 420#endif