1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Berkeley style UIO structures - Alan Cox 1994.
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <linux/mm_types.h>
11 #include <uapi/linux/uio.h>
14 struct pipe_inode_info;
16 typedef unsigned int __bitwise iov_iter_extraction_t;
19 void *iov_base; /* and that should *never* hold a userland pointer */
34 #define ITER_SOURCE 1 // == WRITE
35 #define ITER_DEST 0 // == READ
37 struct iov_iter_state {
40 unsigned long nr_segs;
54 * Hack alert: overlay ubuf_iovec with iovec + count, so
55 * that the members resolve correctly regardless of the type
56 * of iterator used. This means that you can use:
58 * &iter->__ubuf_iovec or iter->__iov
60 * interchangably for the user_backed cases, hence simplifying
61 * some of the cases that need to deal with both.
65 * This really should be a const, but we cannot do that without
66 * also modifying any of the zero-filling iter init functions.
67 * Leave it non-const for now, but it should be treated as such.
69 struct iovec __ubuf_iovec;
72 /* use iter_iov() to get the current vec */
73 const struct iovec *__iov;
74 const struct kvec *kvec;
75 const struct bio_vec *bvec;
76 struct xarray *xarray;
77 struct pipe_inode_info *pipe;
84 unsigned long nr_segs;
87 unsigned int start_head;
93 static inline const struct iovec *iter_iov(const struct iov_iter *iter)
95 if (iter->iter_type == ITER_UBUF)
96 return (const struct iovec *) &iter->__ubuf_iovec;
100 #define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset)
101 #define iter_iov_len(iter) (iter_iov(iter)->iov_len - (iter)->iov_offset)
103 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
108 static inline void iov_iter_save_state(struct iov_iter *iter,
109 struct iov_iter_state *state)
111 state->iov_offset = iter->iov_offset;
112 state->count = iter->count;
113 state->nr_segs = iter->nr_segs;
116 static inline bool iter_is_ubuf(const struct iov_iter *i)
118 return iov_iter_type(i) == ITER_UBUF;
121 static inline bool iter_is_iovec(const struct iov_iter *i)
123 return iov_iter_type(i) == ITER_IOVEC;
126 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
128 return iov_iter_type(i) == ITER_KVEC;
131 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
133 return iov_iter_type(i) == ITER_BVEC;
136 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
138 return iov_iter_type(i) == ITER_PIPE;
141 static inline bool iov_iter_is_discard(const struct iov_iter *i)
143 return iov_iter_type(i) == ITER_DISCARD;
146 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
148 return iov_iter_type(i) == ITER_XARRAY;
151 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
153 return i->data_source ? WRITE : READ;
156 static inline bool user_backed_iter(const struct iov_iter *i)
158 return i->user_backed;
162 * Total number of bytes covered by an iovec.
164 * NOTE that it is not safe to use this function until all the iovec's
165 * segment lengths have been validated. Because the individual lengths can
166 * overflow a size_t when added together.
168 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
173 for (seg = 0; seg < nr_segs; seg++)
174 ret += iov[seg].iov_len;
178 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
179 size_t bytes, struct iov_iter *i);
180 void iov_iter_advance(struct iov_iter *i, size_t bytes);
181 void iov_iter_revert(struct iov_iter *i, size_t bytes);
182 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
183 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
184 size_t iov_iter_single_seg_count(const struct iov_iter *i);
185 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
187 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
190 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
191 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
192 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
194 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
195 size_t bytes, struct iov_iter *i)
197 return copy_page_to_iter(&folio->page, offset, bytes, i);
199 size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
200 size_t bytes, struct iov_iter *i);
202 static __always_inline __must_check
203 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
205 if (check_copy_size(addr, bytes, true))
206 return _copy_to_iter(addr, bytes, i);
210 static __always_inline __must_check
211 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
213 if (check_copy_size(addr, bytes, false))
214 return _copy_from_iter(addr, bytes, i);
218 static __always_inline __must_check
219 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
221 size_t copied = copy_from_iter(addr, bytes, i);
222 if (likely(copied == bytes))
224 iov_iter_revert(i, copied);
228 static __always_inline __must_check
229 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
231 if (check_copy_size(addr, bytes, false))
232 return _copy_from_iter_nocache(addr, bytes, i);
236 static __always_inline __must_check
237 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
239 size_t copied = copy_from_iter_nocache(addr, bytes, i);
240 if (likely(copied == bytes))
242 iov_iter_revert(i, copied);
246 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
248 * Note, users like pmem that depend on the stricter semantics of
249 * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
250 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
251 * destination is flushed from the cache on return.
253 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
255 #define _copy_from_iter_flushcache _copy_from_iter_nocache
258 #ifdef CONFIG_ARCH_HAS_COPY_MC
259 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
260 static inline void iov_iter_set_copy_mc(struct iov_iter *i)
265 static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
270 #define _copy_mc_to_iter _copy_to_iter
271 static inline void iov_iter_set_copy_mc(struct iov_iter *i) { }
272 static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
278 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
279 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
281 unsigned long iov_iter_alignment(const struct iov_iter *i);
282 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
283 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
284 unsigned long nr_segs, size_t count);
285 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
286 unsigned long nr_segs, size_t count);
287 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
288 unsigned long nr_segs, size_t count);
289 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
291 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
292 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
293 loff_t start, size_t count);
294 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
295 size_t maxsize, unsigned maxpages, size_t *start,
296 iov_iter_extraction_t extraction_flags);
297 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
298 size_t maxsize, unsigned maxpages, size_t *start);
299 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
300 struct page ***pages, size_t maxsize, size_t *start,
301 iov_iter_extraction_t extraction_flags);
302 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
303 size_t maxsize, size_t *start);
304 int iov_iter_npages(const struct iov_iter *i, int maxpages);
305 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
307 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
309 static inline size_t iov_iter_count(const struct iov_iter *i)
315 * Cap the iov_iter by given limit; note that the second argument is
316 * *not* the new size - it's upper limit for such. Passing it a value
317 * greater than the amount of data in iov_iter is fine - it'll just do
318 * nothing in that case.
320 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
323 * count doesn't have to fit in size_t - comparison extends both
324 * operands to u64 here and any value that would be truncated by
325 * conversion in assignement is by definition greater than all
326 * values of size_t, including old i->count.
328 if (i->count > count)
333 * reexpand a previously truncated iterator; count must be no more than how much
336 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
342 iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
347 if (iov_iter_count(i) > max_bytes) {
348 shorted = iov_iter_count(i) - max_bytes;
349 iov_iter_truncate(i, max_bytes);
351 npages = iov_iter_npages(i, maxpages);
353 iov_iter_reexpand(i, iov_iter_count(i) + shorted);
363 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
364 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
366 static __always_inline __must_check
367 bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
368 __wsum *csum, struct iov_iter *i)
370 size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
371 if (likely(copied == bytes))
373 iov_iter_revert(i, copied);
376 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
379 struct iovec *iovec_from_user(const struct iovec __user *uvector,
380 unsigned long nr_segs, unsigned long fast_segs,
381 struct iovec *fast_iov, bool compat);
382 ssize_t import_iovec(int type, const struct iovec __user *uvec,
383 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
385 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
386 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
387 struct iov_iter *i, bool compat);
388 int import_single_range(int type, void __user *buf, size_t len,
389 struct iovec *iov, struct iov_iter *i);
390 int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
392 static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
393 void __user *buf, size_t count)
395 WARN_ON(direction & ~(READ | WRITE));
396 *i = (struct iov_iter) {
397 .iter_type = ITER_UBUF,
400 .data_source = direction,
406 /* Flags for iov_iter_get/extract_pages*() */
407 /* Allow P2PDMA on the extracted pages */
408 #define ITER_ALLOW_P2PDMA ((__force iov_iter_extraction_t)0x01)
410 ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
411 size_t maxsize, unsigned int maxpages,
412 iov_iter_extraction_t extraction_flags,
416 * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
417 * @iter: The iterator
419 * Examine the iterator and indicate by returning true or false as to how, if
420 * at all, pages extracted from the iterator will be retained by the extraction
423 * %true indicates that the pages will have a pin placed in them that the
424 * caller must unpin. This is must be done for DMA/async DIO to force fork()
425 * to forcibly copy a page for the child (the parent must retain the original
428 * %false indicates that no measures are taken and that it's up to the caller
429 * to retain the pages.
431 static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
433 return user_backed_iter(iter);