1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
17 #define PIPE_PARANOIA /* for now */
19 /* covers ubuf and kbuf alike */
20 #define iterate_buf(i, n, base, len, off, __p, STEP) { \
21 size_t __maybe_unused off = 0; \
23 base = __p + i->iov_offset; \
25 i->iov_offset += len; \
29 /* covers iovec and kvec alike */
30 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \
32 size_t skip = i->iov_offset; \
34 len = min(n, __p->iov_len - skip); \
36 base = __p->iov_base + skip; \
41 if (skip < __p->iov_len) \
47 i->iov_offset = skip; \
51 #define iterate_bvec(i, n, base, len, off, p, STEP) { \
53 unsigned skip = i->iov_offset; \
55 unsigned offset = p->bv_offset + skip; \
57 void *kaddr = kmap_local_page(p->bv_page + \
58 offset / PAGE_SIZE); \
59 base = kaddr + offset % PAGE_SIZE; \
60 len = min(min(n, (size_t)(p->bv_len - skip)), \
61 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
63 kunmap_local(kaddr); \
67 if (skip == p->bv_len) { \
75 i->iov_offset = skip; \
79 #define iterate_xarray(i, n, base, len, __off, STEP) { \
82 struct folio *folio; \
83 loff_t start = i->xarray_start + i->iov_offset; \
84 pgoff_t index = start / PAGE_SIZE; \
85 XA_STATE(xas, i->xarray, index); \
87 len = PAGE_SIZE - offset_in_page(start); \
89 xas_for_each(&xas, folio, ULONG_MAX) { \
92 if (xas_retry(&xas, folio)) \
94 if (WARN_ON(xa_is_value(folio))) \
96 if (WARN_ON(folio_test_hugetlb(folio))) \
98 offset = offset_in_folio(folio, start + __off); \
99 while (offset < folio_size(folio)) { \
100 base = kmap_local_folio(folio, offset); \
103 kunmap_local(base); \
107 if (left || n == 0) \
115 i->iov_offset += __off; \
119 #define __iterate_and_advance(i, n, base, len, off, I, K) { \
120 if (unlikely(i->count < n)) \
123 if (likely(iter_is_ubuf(i))) { \
126 iterate_buf(i, n, base, len, off, \
128 } else if (likely(iter_is_iovec(i))) { \
129 const struct iovec *iov = iter_iov(i); \
132 iterate_iovec(i, n, base, len, off, \
134 i->nr_segs -= iov - iter_iov(i); \
136 } else if (iov_iter_is_bvec(i)) { \
137 const struct bio_vec *bvec = i->bvec; \
140 iterate_bvec(i, n, base, len, off, \
142 i->nr_segs -= bvec - i->bvec; \
144 } else if (iov_iter_is_kvec(i)) { \
145 const struct kvec *kvec = i->kvec; \
148 iterate_iovec(i, n, base, len, off, \
150 i->nr_segs -= kvec - i->kvec; \
152 } else if (iov_iter_is_xarray(i)) { \
155 iterate_xarray(i, n, base, len, off, \
161 #define iterate_and_advance(i, n, base, len, off, I, K) \
162 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
164 static int copyout(void __user *to, const void *from, size_t n)
166 if (should_fail_usercopy())
168 if (access_ok(to, n)) {
169 instrument_copy_to_user(to, from, n);
170 n = raw_copy_to_user(to, from, n);
175 static int copyout_nofault(void __user *to, const void *from, size_t n)
179 if (should_fail_usercopy())
182 res = copy_to_user_nofault(to, from, n);
184 return res < 0 ? n : res;
187 static int copyin(void *to, const void __user *from, size_t n)
191 if (should_fail_usercopy())
193 if (access_ok(from, n)) {
194 instrument_copy_from_user_before(to, from, n);
195 res = raw_copy_from_user(to, from, n);
196 instrument_copy_from_user_after(to, from, n, res);
202 static bool sanity(const struct iov_iter *i)
204 struct pipe_inode_info *pipe = i->pipe;
205 unsigned int p_head = pipe->head;
206 unsigned int p_tail = pipe->tail;
207 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
208 unsigned int i_head = i->head;
211 if (i->last_offset) {
212 struct pipe_buffer *p;
213 if (unlikely(p_occupancy == 0))
214 goto Bad; // pipe must be non-empty
215 if (unlikely(i_head != p_head - 1))
216 goto Bad; // must be at the last buffer...
218 p = pipe_buf(pipe, i_head);
219 if (unlikely(p->offset + p->len != abs(i->last_offset)))
220 goto Bad; // ... at the end of segment
222 if (i_head != p_head)
223 goto Bad; // must be right after the last buffer
227 printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset);
228 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
229 p_head, p_tail, pipe->ring_size);
230 for (idx = 0; idx < pipe->ring_size; idx++)
231 printk(KERN_ERR "[%p %p %d %d]\n",
233 pipe->bufs[idx].page,
234 pipe->bufs[idx].offset,
235 pipe->bufs[idx].len);
240 #define sanity(i) true
243 static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size)
245 struct page *page = alloc_page(GFP_USER);
247 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
248 *buf = (struct pipe_buffer) {
249 .ops = &default_pipe_buf_ops,
258 static void push_page(struct pipe_inode_info *pipe, struct page *page,
259 unsigned int offset, unsigned int size)
261 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
262 *buf = (struct pipe_buffer) {
263 .ops = &page_cache_pipe_buf_ops,
271 static inline int last_offset(const struct pipe_buffer *buf)
273 if (buf->ops == &default_pipe_buf_ops)
274 return buf->len; // buf->offset is 0 for those
276 return -(buf->offset + buf->len);
279 static struct page *append_pipe(struct iov_iter *i, size_t size,
282 struct pipe_inode_info *pipe = i->pipe;
283 int offset = i->last_offset;
284 struct pipe_buffer *buf;
287 if (offset > 0 && offset < PAGE_SIZE) {
288 // some space in the last buffer; add to it
289 buf = pipe_buf(pipe, pipe->head - 1);
290 size = min_t(size_t, size, PAGE_SIZE - offset);
292 i->last_offset += size;
297 // OK, we need a new buffer
299 size = min_t(size_t, size, PAGE_SIZE);
300 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
302 page = push_anon(pipe, size);
305 i->head = pipe->head - 1;
306 i->last_offset = size;
311 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
314 struct pipe_inode_info *pipe = i->pipe;
315 unsigned int head = pipe->head;
317 if (unlikely(bytes > i->count))
320 if (unlikely(!bytes))
326 if (offset && i->last_offset == -offset) { // could we merge it?
327 struct pipe_buffer *buf = pipe_buf(pipe, head - 1);
328 if (buf->page == page) {
330 i->last_offset -= bytes;
335 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
338 push_page(pipe, page, offset, bytes);
339 i->last_offset = -(offset + bytes);
346 * fault_in_iov_iter_readable - fault in iov iterator for reading
348 * @size: maximum length
350 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
351 * @size. For each iovec, fault in each page that constitutes the iovec.
353 * Returns the number of bytes not faulted in (like copy_to_user() and
356 * Always returns 0 for non-userspace iterators.
358 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
360 if (iter_is_ubuf(i)) {
361 size_t n = min(size, iov_iter_count(i));
362 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
364 } else if (iter_is_iovec(i)) {
365 size_t count = min(size, iov_iter_count(i));
366 const struct iovec *p;
370 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
371 size_t len = min(count, p->iov_len - skip);
376 ret = fault_in_readable(p->iov_base + skip, len);
385 EXPORT_SYMBOL(fault_in_iov_iter_readable);
388 * fault_in_iov_iter_writeable - fault in iov iterator for writing
390 * @size: maximum length
392 * Faults in the iterator using get_user_pages(), i.e., without triggering
393 * hardware page faults. This is primarily useful when we already know that
394 * some or all of the pages in @i aren't in memory.
396 * Returns the number of bytes not faulted in, like copy_to_user() and
399 * Always returns 0 for non-user-space iterators.
401 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
403 if (iter_is_ubuf(i)) {
404 size_t n = min(size, iov_iter_count(i));
405 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
407 } else if (iter_is_iovec(i)) {
408 size_t count = min(size, iov_iter_count(i));
409 const struct iovec *p;
413 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
414 size_t len = min(count, p->iov_len - skip);
419 ret = fault_in_safe_writeable(p->iov_base + skip, len);
428 EXPORT_SYMBOL(fault_in_iov_iter_writeable);
430 void iov_iter_init(struct iov_iter *i, unsigned int direction,
431 const struct iovec *iov, unsigned long nr_segs,
434 WARN_ON(direction & ~(READ | WRITE));
435 *i = (struct iov_iter) {
436 .iter_type = ITER_IOVEC,
439 .data_source = direction,
446 EXPORT_SYMBOL(iov_iter_init);
448 // returns the offset in partial buffer (if any)
449 static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages)
451 struct pipe_inode_info *pipe = i->pipe;
452 int used = pipe->head - pipe->tail;
453 int off = i->last_offset;
455 *npages = max((int)pipe->max_usage - used, 0);
457 if (off > 0 && off < PAGE_SIZE) { // anon and not full
464 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
467 unsigned int off, chunk;
469 if (unlikely(bytes > i->count))
471 if (unlikely(!bytes))
477 for (size_t n = bytes; n; n -= chunk) {
478 struct page *page = append_pipe(i, n, &off);
479 chunk = min_t(size_t, n, PAGE_SIZE - off);
482 memcpy_to_page(page, off, addr, chunk);
488 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
489 __wsum sum, size_t off)
491 __wsum next = csum_partial_copy_nocheck(from, to, len);
492 return csum_block_add(sum, next, off);
495 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
496 struct iov_iter *i, __wsum *sump)
500 unsigned int chunk, r;
502 if (unlikely(bytes > i->count))
504 if (unlikely(!bytes))
511 struct page *page = append_pipe(i, bytes, &r);
516 chunk = min_t(size_t, bytes, PAGE_SIZE - r);
517 p = kmap_local_page(page);
518 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
527 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
529 if (WARN_ON_ONCE(i->data_source))
531 if (unlikely(iov_iter_is_pipe(i)))
532 return copy_pipe_to_iter(addr, bytes, i);
533 if (user_backed_iter(i))
535 iterate_and_advance(i, bytes, base, len, off,
536 copyout(base, addr + off, len),
537 memcpy(base, addr + off, len)
542 EXPORT_SYMBOL(_copy_to_iter);
544 #ifdef CONFIG_ARCH_HAS_COPY_MC
545 static int copyout_mc(void __user *to, const void *from, size_t n)
547 if (access_ok(to, n)) {
548 instrument_copy_to_user(to, from, n);
549 n = copy_mc_to_user((__force void *) to, from, n);
554 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
558 unsigned int off, chunk;
560 if (unlikely(bytes > i->count))
562 if (unlikely(!bytes))
569 struct page *page = append_pipe(i, bytes, &off);
575 chunk = min_t(size_t, bytes, PAGE_SIZE - off);
576 p = kmap_local_page(page);
577 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
583 iov_iter_revert(i, rem);
591 * _copy_mc_to_iter - copy to iter with source memory error exception handling
592 * @addr: source kernel address
593 * @bytes: total transfer length
594 * @i: destination iterator
596 * The pmem driver deploys this for the dax operation
597 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
598 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
599 * successfully copied.
601 * The main differences between this and typical _copy_to_iter().
603 * * Typical tail/residue handling after a fault retries the copy
604 * byte-by-byte until the fault happens again. Re-triggering machine
605 * checks is potentially fatal so the implementation uses source
606 * alignment and poison alignment assumptions to avoid re-triggering
607 * hardware exceptions.
609 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
610 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
613 * Return: number of bytes copied (may be %0)
615 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
617 if (WARN_ON_ONCE(i->data_source))
619 if (unlikely(iov_iter_is_pipe(i)))
620 return copy_mc_pipe_to_iter(addr, bytes, i);
621 if (user_backed_iter(i))
623 __iterate_and_advance(i, bytes, base, len, off,
624 copyout_mc(base, addr + off, len),
625 copy_mc_to_kernel(base, addr + off, len)
630 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
631 #endif /* CONFIG_ARCH_HAS_COPY_MC */
633 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
635 if (WARN_ON_ONCE(!i->data_source))
638 if (user_backed_iter(i))
640 iterate_and_advance(i, bytes, base, len, off,
641 copyin(addr + off, base, len),
642 memcpy(addr + off, base, len)
647 EXPORT_SYMBOL(_copy_from_iter);
649 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
651 if (WARN_ON_ONCE(!i->data_source))
654 iterate_and_advance(i, bytes, base, len, off,
655 __copy_from_user_inatomic_nocache(addr + off, base, len),
656 memcpy(addr + off, base, len)
661 EXPORT_SYMBOL(_copy_from_iter_nocache);
663 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
665 * _copy_from_iter_flushcache - write destination through cpu cache
666 * @addr: destination kernel address
667 * @bytes: total transfer length
668 * @i: source iterator
670 * The pmem driver arranges for filesystem-dax to use this facility via
671 * dax_copy_from_iter() for ensuring that writes to persistent memory
672 * are flushed through the CPU cache. It is differentiated from
673 * _copy_from_iter_nocache() in that guarantees all data is flushed for
674 * all iterator types. The _copy_from_iter_nocache() only attempts to
675 * bypass the cache for the ITER_IOVEC case, and on some archs may use
676 * instructions that strand dirty-data in the cache.
678 * Return: number of bytes copied (may be %0)
680 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
682 if (WARN_ON_ONCE(!i->data_source))
685 iterate_and_advance(i, bytes, base, len, off,
686 __copy_from_user_flushcache(addr + off, base, len),
687 memcpy_flushcache(addr + off, base, len)
692 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
695 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
698 size_t v = n + offset;
701 * The general case needs to access the page order in order
702 * to compute the page size.
703 * However, we mostly deal with order-0 pages and thus can
704 * avoid a possible cache line miss for requests that fit all
707 if (n <= v && v <= PAGE_SIZE)
710 head = compound_head(page);
711 v += (page - head) << PAGE_SHIFT;
713 if (WARN_ON(n > v || v > page_size(head)))
718 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
722 if (!page_copy_sane(page, offset, bytes))
724 if (WARN_ON_ONCE(i->data_source))
726 if (unlikely(iov_iter_is_pipe(i)))
727 return copy_page_to_iter_pipe(page, offset, bytes, i);
728 page += offset / PAGE_SIZE; // first subpage
731 void *kaddr = kmap_local_page(page);
732 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
733 n = _copy_to_iter(kaddr + offset, n, i);
740 if (offset == PAGE_SIZE) {
747 EXPORT_SYMBOL(copy_page_to_iter);
749 size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes,
754 if (!page_copy_sane(page, offset, bytes))
756 if (WARN_ON_ONCE(i->data_source))
758 if (unlikely(iov_iter_is_pipe(i)))
759 return copy_page_to_iter_pipe(page, offset, bytes, i);
760 page += offset / PAGE_SIZE; // first subpage
763 void *kaddr = kmap_local_page(page);
764 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
766 iterate_and_advance(i, n, base, len, off,
767 copyout_nofault(base, kaddr + offset + off, len),
768 memcpy(base, kaddr + offset + off, len)
776 if (offset == PAGE_SIZE) {
783 EXPORT_SYMBOL(copy_page_to_iter_nofault);
785 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
789 if (!page_copy_sane(page, offset, bytes))
791 page += offset / PAGE_SIZE; // first subpage
794 void *kaddr = kmap_local_page(page);
795 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
796 n = _copy_from_iter(kaddr + offset, n, i);
803 if (offset == PAGE_SIZE) {
810 EXPORT_SYMBOL(copy_page_from_iter);
812 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
814 unsigned int chunk, off;
816 if (unlikely(bytes > i->count))
818 if (unlikely(!bytes))
824 for (size_t n = bytes; n; n -= chunk) {
825 struct page *page = append_pipe(i, n, &off);
830 chunk = min_t(size_t, n, PAGE_SIZE - off);
831 p = kmap_local_page(page);
832 memset(p + off, 0, chunk);
838 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
840 if (unlikely(iov_iter_is_pipe(i)))
841 return pipe_zero(bytes, i);
842 iterate_and_advance(i, bytes, base, len, count,
843 clear_user(base, len),
849 EXPORT_SYMBOL(iov_iter_zero);
851 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
854 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
855 if (!page_copy_sane(page, offset, bytes)) {
856 kunmap_atomic(kaddr);
859 if (WARN_ON_ONCE(!i->data_source)) {
860 kunmap_atomic(kaddr);
863 iterate_and_advance(i, bytes, base, len, off,
864 copyin(p + off, base, len),
865 memcpy(p + off, base, len)
867 kunmap_atomic(kaddr);
870 EXPORT_SYMBOL(copy_page_from_iter_atomic);
872 static void pipe_advance(struct iov_iter *i, size_t size)
874 struct pipe_inode_info *pipe = i->pipe;
875 int off = i->last_offset;
878 pipe_discard_from(pipe, i->start_head); // discard everything
883 struct pipe_buffer *buf = pipe_buf(pipe, i->head);
884 if (off) /* make it relative to the beginning of buffer */
885 size += abs(off) - buf->offset;
886 if (size <= buf->len) {
888 i->last_offset = last_offset(buf);
895 pipe_discard_from(pipe, i->head + 1); // discard everything past this one
898 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
900 const struct bio_vec *bvec, *end;
906 size += i->iov_offset;
908 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
909 if (likely(size < bvec->bv_len))
911 size -= bvec->bv_len;
913 i->iov_offset = size;
914 i->nr_segs -= bvec - i->bvec;
918 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
920 const struct iovec *iov, *end;
926 size += i->iov_offset; // from beginning of current segment
927 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
928 if (likely(size < iov->iov_len))
930 size -= iov->iov_len;
932 i->iov_offset = size;
933 i->nr_segs -= iov - iter_iov(i);
937 void iov_iter_advance(struct iov_iter *i, size_t size)
939 if (unlikely(i->count < size))
941 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
942 i->iov_offset += size;
944 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
945 /* iovec and kvec have identical layouts */
946 iov_iter_iovec_advance(i, size);
947 } else if (iov_iter_is_bvec(i)) {
948 iov_iter_bvec_advance(i, size);
949 } else if (iov_iter_is_pipe(i)) {
950 pipe_advance(i, size);
951 } else if (iov_iter_is_discard(i)) {
955 EXPORT_SYMBOL(iov_iter_advance);
957 void iov_iter_revert(struct iov_iter *i, size_t unroll)
961 if (WARN_ON(unroll > MAX_RW_COUNT))
964 if (unlikely(iov_iter_is_pipe(i))) {
965 struct pipe_inode_info *pipe = i->pipe;
966 unsigned int head = pipe->head;
968 while (head > i->start_head) {
969 struct pipe_buffer *b = pipe_buf(pipe, --head);
970 if (unroll < b->len) {
972 i->last_offset = last_offset(b);
977 pipe_buf_release(pipe, b);
984 if (unlikely(iov_iter_is_discard(i)))
986 if (unroll <= i->iov_offset) {
987 i->iov_offset -= unroll;
990 unroll -= i->iov_offset;
991 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
992 BUG(); /* We should never go beyond the start of the specified
993 * range since we might then be straying into pages that
996 } else if (iov_iter_is_bvec(i)) {
997 const struct bio_vec *bvec = i->bvec;
999 size_t n = (--bvec)->bv_len;
1003 i->iov_offset = n - unroll;
1008 } else { /* same logics for iovec and kvec */
1009 const struct iovec *iov = iter_iov(i);
1011 size_t n = (--iov)->iov_len;
1015 i->iov_offset = n - unroll;
1022 EXPORT_SYMBOL(iov_iter_revert);
1025 * Return the count of just the current iov_iter segment.
1027 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1029 if (i->nr_segs > 1) {
1030 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1031 return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
1032 if (iov_iter_is_bvec(i))
1033 return min(i->count, i->bvec->bv_len - i->iov_offset);
1037 EXPORT_SYMBOL(iov_iter_single_seg_count);
1039 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1040 const struct kvec *kvec, unsigned long nr_segs,
1043 WARN_ON(direction & ~(READ | WRITE));
1044 *i = (struct iov_iter){
1045 .iter_type = ITER_KVEC,
1046 .data_source = direction,
1053 EXPORT_SYMBOL(iov_iter_kvec);
1055 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1056 const struct bio_vec *bvec, unsigned long nr_segs,
1059 WARN_ON(direction & ~(READ | WRITE));
1060 *i = (struct iov_iter){
1061 .iter_type = ITER_BVEC,
1062 .data_source = direction,
1069 EXPORT_SYMBOL(iov_iter_bvec);
1071 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1072 struct pipe_inode_info *pipe,
1075 BUG_ON(direction != READ);
1076 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1077 *i = (struct iov_iter){
1078 .iter_type = ITER_PIPE,
1079 .data_source = false,
1082 .start_head = pipe->head,
1087 EXPORT_SYMBOL(iov_iter_pipe);
1090 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1091 * @i: The iterator to initialise.
1092 * @direction: The direction of the transfer.
1093 * @xarray: The xarray to access.
1094 * @start: The start file position.
1095 * @count: The size of the I/O buffer in bytes.
1097 * Set up an I/O iterator to either draw data out of the pages attached to an
1098 * inode or to inject data into those pages. The pages *must* be prevented
1099 * from evaporation, either by taking a ref on them or locking them by the
1102 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1103 struct xarray *xarray, loff_t start, size_t count)
1105 BUG_ON(direction & ~1);
1106 *i = (struct iov_iter) {
1107 .iter_type = ITER_XARRAY,
1108 .data_source = direction,
1110 .xarray_start = start,
1115 EXPORT_SYMBOL(iov_iter_xarray);
1118 * iov_iter_discard - Initialise an I/O iterator that discards data
1119 * @i: The iterator to initialise.
1120 * @direction: The direction of the transfer.
1121 * @count: The size of the I/O buffer in bytes.
1123 * Set up an I/O iterator that just discards everything that's written to it.
1124 * It's only available as a READ iterator.
1126 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1128 BUG_ON(direction != READ);
1129 *i = (struct iov_iter){
1130 .iter_type = ITER_DISCARD,
1131 .data_source = false,
1136 EXPORT_SYMBOL(iov_iter_discard);
1138 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
1141 size_t size = i->count;
1142 size_t skip = i->iov_offset;
1145 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1146 const struct iovec *iov = iter_iov(i) + k;
1147 size_t len = iov->iov_len - skip;
1153 if ((unsigned long)(iov->iov_base + skip) & addr_mask)
1163 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
1166 size_t size = i->count;
1167 unsigned skip = i->iov_offset;
1170 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1171 size_t len = i->bvec[k].bv_len - skip;
1177 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
1188 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
1189 * are aligned to the parameters.
1191 * @i: &struct iov_iter to restore
1192 * @addr_mask: bit mask to check against the iov element's addresses
1193 * @len_mask: bit mask to check against the iov element's lengths
1195 * Return: false if any addresses or lengths intersect with the provided masks
1197 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
1200 if (likely(iter_is_ubuf(i))) {
1201 if (i->count & len_mask)
1203 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
1208 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1209 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
1211 if (iov_iter_is_bvec(i))
1212 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
1214 if (iov_iter_is_pipe(i)) {
1215 size_t size = i->count;
1217 if (size & len_mask)
1219 if (size && i->last_offset > 0) {
1220 if (i->last_offset & addr_mask)
1227 if (iov_iter_is_xarray(i)) {
1228 if (i->count & len_mask)
1230 if ((i->xarray_start + i->iov_offset) & addr_mask)
1236 EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
1238 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1240 unsigned long res = 0;
1241 size_t size = i->count;
1242 size_t skip = i->iov_offset;
1245 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1246 const struct iovec *iov = iter_iov(i) + k;
1247 size_t len = iov->iov_len - skip;
1249 res |= (unsigned long)iov->iov_base + skip;
1261 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1264 size_t size = i->count;
1265 unsigned skip = i->iov_offset;
1268 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1269 size_t len = i->bvec[k].bv_len - skip;
1270 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1281 unsigned long iov_iter_alignment(const struct iov_iter *i)
1283 if (likely(iter_is_ubuf(i))) {
1284 size_t size = i->count;
1286 return ((unsigned long)i->ubuf + i->iov_offset) | size;
1290 /* iovec and kvec have identical layouts */
1291 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1292 return iov_iter_alignment_iovec(i);
1294 if (iov_iter_is_bvec(i))
1295 return iov_iter_alignment_bvec(i);
1297 if (iov_iter_is_pipe(i)) {
1298 size_t size = i->count;
1300 if (size && i->last_offset > 0)
1301 return size | i->last_offset;
1305 if (iov_iter_is_xarray(i))
1306 return (i->xarray_start + i->iov_offset) | i->count;
1310 EXPORT_SYMBOL(iov_iter_alignment);
1312 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1314 unsigned long res = 0;
1315 unsigned long v = 0;
1316 size_t size = i->count;
1319 if (iter_is_ubuf(i))
1322 if (WARN_ON(!iter_is_iovec(i)))
1325 for (k = 0; k < i->nr_segs; k++) {
1326 const struct iovec *iov = iter_iov(i) + k;
1328 unsigned long base = (unsigned long)iov->iov_base;
1329 if (v) // if not the first one
1330 res |= base | v; // this start | previous end
1331 v = base + iov->iov_len;
1332 if (size <= iov->iov_len)
1334 size -= iov->iov_len;
1339 EXPORT_SYMBOL(iov_iter_gap_alignment);
1341 static int want_pages_array(struct page ***res, size_t size,
1342 size_t start, unsigned int maxpages)
1344 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
1346 if (count > maxpages)
1348 WARN_ON(!count); // caller should've prevented that
1350 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
1357 static ssize_t pipe_get_pages(struct iov_iter *i,
1358 struct page ***pages, size_t maxsize, unsigned maxpages,
1361 unsigned int npages, count, off, chunk;
1368 *start = off = pipe_npages(i, &npages);
1371 count = want_pages_array(pages, maxsize, off, min(npages, maxpages));
1375 for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) {
1376 struct page *page = append_pipe(i, left, &off);
1379 chunk = min_t(size_t, left, PAGE_SIZE - off);
1380 get_page(*p++ = page);
1384 return maxsize - left;
1387 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1388 pgoff_t index, unsigned int nr_pages)
1390 XA_STATE(xas, xa, index);
1392 unsigned int ret = 0;
1395 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1396 if (xas_retry(&xas, page))
1399 /* Has the page moved or been split? */
1400 if (unlikely(page != xas_reload(&xas))) {
1405 pages[ret] = find_subpage(page, xas.xa_index);
1406 get_page(pages[ret]);
1407 if (++ret == nr_pages)
1414 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1415 struct page ***pages, size_t maxsize,
1416 unsigned maxpages, size_t *_start_offset)
1418 unsigned nr, offset, count;
1422 pos = i->xarray_start + i->iov_offset;
1423 index = pos >> PAGE_SHIFT;
1424 offset = pos & ~PAGE_MASK;
1425 *_start_offset = offset;
1427 count = want_pages_array(pages, maxsize, offset, maxpages);
1430 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
1434 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1435 i->iov_offset += maxsize;
1436 i->count -= maxsize;
1440 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1441 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
1446 if (iter_is_ubuf(i))
1447 return (unsigned long)i->ubuf + i->iov_offset;
1449 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1450 const struct iovec *iov = iter_iov(i) + k;
1451 size_t len = iov->iov_len - skip;
1457 return (unsigned long)iov->iov_base + skip;
1459 BUG(); // if it had been empty, we wouldn't get called
1462 /* must be done on non-empty ITER_BVEC one */
1463 static struct page *first_bvec_segment(const struct iov_iter *i,
1464 size_t *size, size_t *start)
1467 size_t skip = i->iov_offset, len;
1469 len = i->bvec->bv_len - skip;
1472 skip += i->bvec->bv_offset;
1473 page = i->bvec->bv_page + skip / PAGE_SIZE;
1474 *start = skip % PAGE_SIZE;
1478 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
1479 struct page ***pages, size_t maxsize,
1480 unsigned int maxpages, size_t *start,
1481 iov_iter_extraction_t extraction_flags)
1483 unsigned int n, gup_flags = 0;
1485 if (maxsize > i->count)
1489 if (maxsize > MAX_RW_COUNT)
1490 maxsize = MAX_RW_COUNT;
1491 if (extraction_flags & ITER_ALLOW_P2PDMA)
1492 gup_flags |= FOLL_PCI_P2PDMA;
1494 if (likely(user_backed_iter(i))) {
1498 if (iov_iter_rw(i) != WRITE)
1499 gup_flags |= FOLL_WRITE;
1501 gup_flags |= FOLL_NOFAULT;
1503 addr = first_iovec_segment(i, &maxsize);
1504 *start = addr % PAGE_SIZE;
1506 n = want_pages_array(pages, maxsize, *start, maxpages);
1509 res = get_user_pages_fast(addr, n, gup_flags, *pages);
1510 if (unlikely(res <= 0))
1512 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1513 iov_iter_advance(i, maxsize);
1516 if (iov_iter_is_bvec(i)) {
1520 page = first_bvec_segment(i, &maxsize, start);
1521 n = want_pages_array(pages, maxsize, *start, maxpages);
1525 for (int k = 0; k < n; k++)
1526 get_page(p[k] = page + k);
1527 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1528 i->count -= maxsize;
1529 i->iov_offset += maxsize;
1530 if (i->iov_offset == i->bvec->bv_len) {
1537 if (iov_iter_is_pipe(i))
1538 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1539 if (iov_iter_is_xarray(i))
1540 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1544 ssize_t iov_iter_get_pages(struct iov_iter *i,
1545 struct page **pages, size_t maxsize, unsigned maxpages,
1546 size_t *start, iov_iter_extraction_t extraction_flags)
1552 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
1553 start, extraction_flags);
1555 EXPORT_SYMBOL_GPL(iov_iter_get_pages);
1557 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
1558 size_t maxsize, unsigned maxpages, size_t *start)
1560 return iov_iter_get_pages(i, pages, maxsize, maxpages, start, 0);
1562 EXPORT_SYMBOL(iov_iter_get_pages2);
1564 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1565 struct page ***pages, size_t maxsize,
1566 size_t *start, iov_iter_extraction_t extraction_flags)
1572 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start,
1580 EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc);
1582 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
1583 struct page ***pages, size_t maxsize, size_t *start)
1585 return iov_iter_get_pages_alloc(i, pages, maxsize, start, 0);
1587 EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
1589 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1594 if (WARN_ON_ONCE(!i->data_source))
1597 iterate_and_advance(i, bytes, base, len, off, ({
1598 next = csum_and_copy_from_user(base, addr + off, len);
1599 sum = csum_block_add(sum, next, off);
1602 sum = csum_and_memcpy(addr + off, base, len, sum, off);
1608 EXPORT_SYMBOL(csum_and_copy_from_iter);
1610 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1613 struct csum_state *csstate = _csstate;
1616 if (WARN_ON_ONCE(i->data_source))
1618 if (unlikely(iov_iter_is_discard(i))) {
1619 // can't use csum_memcpy() for that one - data is not copied
1620 csstate->csum = csum_block_add(csstate->csum,
1621 csum_partial(addr, bytes, 0),
1623 csstate->off += bytes;
1627 sum = csum_shift(csstate->csum, csstate->off);
1628 if (unlikely(iov_iter_is_pipe(i)))
1629 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1630 else iterate_and_advance(i, bytes, base, len, off, ({
1631 next = csum_and_copy_to_user(addr + off, base, len);
1632 sum = csum_block_add(sum, next, off);
1635 sum = csum_and_memcpy(base, addr + off, len, sum, off);
1638 csstate->csum = csum_shift(sum, csstate->off);
1639 csstate->off += bytes;
1642 EXPORT_SYMBOL(csum_and_copy_to_iter);
1644 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1647 #ifdef CONFIG_CRYPTO_HASH
1648 struct ahash_request *hash = hashp;
1649 struct scatterlist sg;
1652 copied = copy_to_iter(addr, bytes, i);
1653 sg_init_one(&sg, addr, copied);
1654 ahash_request_set_crypt(hash, &sg, NULL, copied);
1655 crypto_ahash_update(hash);
1661 EXPORT_SYMBOL(hash_and_copy_to_iter);
1663 static int iov_npages(const struct iov_iter *i, int maxpages)
1665 size_t skip = i->iov_offset, size = i->count;
1666 const struct iovec *p;
1669 for (p = iter_iov(i); size; skip = 0, p++) {
1670 unsigned offs = offset_in_page(p->iov_base + skip);
1671 size_t len = min(p->iov_len - skip, size);
1675 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1676 if (unlikely(npages > maxpages))
1683 static int bvec_npages(const struct iov_iter *i, int maxpages)
1685 size_t skip = i->iov_offset, size = i->count;
1686 const struct bio_vec *p;
1689 for (p = i->bvec; size; skip = 0, p++) {
1690 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1691 size_t len = min(p->bv_len - skip, size);
1694 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1695 if (unlikely(npages > maxpages))
1701 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1703 if (unlikely(!i->count))
1705 if (likely(iter_is_ubuf(i))) {
1706 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1707 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1708 return min(npages, maxpages);
1710 /* iovec and kvec have identical layouts */
1711 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1712 return iov_npages(i, maxpages);
1713 if (iov_iter_is_bvec(i))
1714 return bvec_npages(i, maxpages);
1715 if (iov_iter_is_pipe(i)) {
1721 pipe_npages(i, &npages);
1722 return min(npages, maxpages);
1724 if (iov_iter_is_xarray(i)) {
1725 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1726 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1727 return min(npages, maxpages);
1731 EXPORT_SYMBOL(iov_iter_npages);
1733 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1736 if (unlikely(iov_iter_is_pipe(new))) {
1740 if (iov_iter_is_bvec(new))
1741 return new->bvec = kmemdup(new->bvec,
1742 new->nr_segs * sizeof(struct bio_vec),
1744 else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
1745 /* iovec and kvec have identical layout */
1746 return new->__iov = kmemdup(new->__iov,
1747 new->nr_segs * sizeof(struct iovec),
1751 EXPORT_SYMBOL(dup_iter);
1753 static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
1754 const struct iovec __user *uvec, unsigned long nr_segs)
1756 const struct compat_iovec __user *uiov =
1757 (const struct compat_iovec __user *)uvec;
1758 int ret = -EFAULT, i;
1760 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1763 for (i = 0; i < nr_segs; i++) {
1767 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1768 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1770 /* check for compat_size_t not fitting in compat_ssize_t .. */
1775 iov[i].iov_base = compat_ptr(buf);
1776 iov[i].iov_len = len;
1785 static int copy_iovec_from_user(struct iovec *iov,
1786 const struct iovec __user *uiov, unsigned long nr_segs)
1790 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1797 unsafe_get_user(len, &uiov->iov_len, uaccess_end);
1798 unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
1800 /* check for size_t not fitting in ssize_t .. */
1801 if (unlikely(len < 0)) {
1805 iov->iov_base = buf;
1809 } while (--nr_segs);
1817 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1818 unsigned long nr_segs, unsigned long fast_segs,
1819 struct iovec *fast_iov, bool compat)
1821 struct iovec *iov = fast_iov;
1825 * SuS says "The readv() function *may* fail if the iovcnt argument was
1826 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1827 * traditionally returned zero for zero segments, so...
1831 if (nr_segs > UIO_MAXIOV)
1832 return ERR_PTR(-EINVAL);
1833 if (nr_segs > fast_segs) {
1834 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1836 return ERR_PTR(-ENOMEM);
1839 if (unlikely(compat))
1840 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1842 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1844 if (iov != fast_iov)
1846 return ERR_PTR(ret);
1853 * Single segment iovec supplied by the user, import it as ITER_UBUF.
1855 static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
1856 struct iovec **iovp, struct iov_iter *i,
1859 struct iovec *iov = *iovp;
1863 ret = copy_compat_iovec_from_user(iov, uvec, 1);
1865 ret = copy_iovec_from_user(iov, uvec, 1);
1869 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
1876 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1877 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1878 struct iov_iter *i, bool compat)
1880 ssize_t total_len = 0;
1885 return __import_iovec_ubuf(type, uvec, iovp, i, compat);
1887 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1890 return PTR_ERR(iov);
1894 * According to the Single Unix Specification we should return EINVAL if
1895 * an element length is < 0 when cast to ssize_t or if the total length
1896 * would overflow the ssize_t return value of the system call.
1898 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1901 for (seg = 0; seg < nr_segs; seg++) {
1902 ssize_t len = (ssize_t)iov[seg].iov_len;
1904 if (!access_ok(iov[seg].iov_base, len)) {
1911 if (len > MAX_RW_COUNT - total_len) {
1912 len = MAX_RW_COUNT - total_len;
1913 iov[seg].iov_len = len;
1918 iov_iter_init(i, type, iov, nr_segs, total_len);
1927 * import_iovec() - Copy an array of &struct iovec from userspace
1928 * into the kernel, check that it is valid, and initialize a new
1929 * &struct iov_iter iterator to access it.
1931 * @type: One of %READ or %WRITE.
1932 * @uvec: Pointer to the userspace array.
1933 * @nr_segs: Number of elements in userspace array.
1934 * @fast_segs: Number of elements in @iov.
1935 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1936 * on-stack) kernel array.
1937 * @i: Pointer to iterator that will be initialized on success.
1939 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1940 * then this function places %NULL in *@iov on return. Otherwise, a new
1941 * array will be allocated and the result placed in *@iov. This means that
1942 * the caller may call kfree() on *@iov regardless of whether the small
1943 * on-stack array was used or not (and regardless of whether this function
1944 * returns an error or not).
1946 * Return: Negative error code on error, bytes imported on success
1948 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1949 unsigned nr_segs, unsigned fast_segs,
1950 struct iovec **iovp, struct iov_iter *i)
1952 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1953 in_compat_syscall());
1955 EXPORT_SYMBOL(import_iovec);
1957 int import_single_range(int rw, void __user *buf, size_t len,
1958 struct iovec *iov, struct iov_iter *i)
1960 if (len > MAX_RW_COUNT)
1962 if (unlikely(!access_ok(buf, len)))
1965 iov_iter_ubuf(i, rw, buf, len);
1968 EXPORT_SYMBOL(import_single_range);
1970 int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
1972 if (len > MAX_RW_COUNT)
1974 if (unlikely(!access_ok(buf, len)))
1977 iov_iter_ubuf(i, rw, buf, len);
1982 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1983 * iov_iter_save_state() was called.
1985 * @i: &struct iov_iter to restore
1986 * @state: state to restore from
1988 * Used after iov_iter_save_state() to bring restore @i, if operations may
1991 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1993 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1995 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
1996 !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
1998 i->iov_offset = state->iov_offset;
1999 i->count = state->count;
2000 if (iter_is_ubuf(i))
2003 * For the *vec iters, nr_segs + iov is constant - if we increment
2004 * the vec, then we also decrement the nr_segs count. Hence we don't
2005 * need to track both of these, just one is enough and we can deduct
2006 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
2007 * size, so we can just increment the iov pointer as they are unionzed.
2008 * ITER_BVEC _may_ be the same size on some archs, but on others it is
2009 * not. Be safe and handle it separately.
2011 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
2012 if (iov_iter_is_bvec(i))
2013 i->bvec -= state->nr_segs - i->nr_segs;
2015 i->__iov -= state->nr_segs - i->nr_segs;
2016 i->nr_segs = state->nr_segs;
2020 * Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not
2021 * get references on the pages, nor does it get a pin on them.
2023 static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
2024 struct page ***pages, size_t maxsize,
2025 unsigned int maxpages,
2026 iov_iter_extraction_t extraction_flags,
2029 struct page *page, **p;
2030 unsigned int nr = 0, offset;
2031 loff_t pos = i->xarray_start + i->iov_offset;
2032 pgoff_t index = pos >> PAGE_SHIFT;
2033 XA_STATE(xas, i->xarray, index);
2035 offset = pos & ~PAGE_MASK;
2038 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
2044 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
2045 if (xas_retry(&xas, page))
2048 /* Has the page moved or been split? */
2049 if (unlikely(page != xas_reload(&xas))) {
2054 p[nr++] = find_subpage(page, xas.xa_index);
2060 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
2061 iov_iter_advance(i, maxsize);
2066 * Extract a list of contiguous pages from an ITER_BVEC iterator. This does
2067 * not get references on the pages, nor does it get a pin on them.
2069 static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
2070 struct page ***pages, size_t maxsize,
2071 unsigned int maxpages,
2072 iov_iter_extraction_t extraction_flags,
2075 struct page **p, *page;
2076 size_t skip = i->iov_offset, offset;
2080 if (i->nr_segs == 0)
2082 maxsize = min(maxsize, i->bvec->bv_len - skip);
2091 skip += i->bvec->bv_offset;
2092 page = i->bvec->bv_page + skip / PAGE_SIZE;
2093 offset = skip % PAGE_SIZE;
2096 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
2100 for (k = 0; k < maxpages; k++)
2103 maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
2104 iov_iter_advance(i, maxsize);
2109 * Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
2110 * This does not get references on the pages, nor does it get a pin on them.
2112 static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
2113 struct page ***pages, size_t maxsize,
2114 unsigned int maxpages,
2115 iov_iter_extraction_t extraction_flags,
2118 struct page **p, *page;
2120 size_t skip = i->iov_offset, offset, len;
2124 if (i->nr_segs == 0)
2126 maxsize = min(maxsize, i->kvec->iov_len - skip);
2135 kaddr = i->kvec->iov_base + skip;
2136 offset = (unsigned long)kaddr & ~PAGE_MASK;
2139 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
2145 len = offset + maxsize;
2146 for (k = 0; k < maxpages; k++) {
2147 size_t seg = min_t(size_t, len, PAGE_SIZE);
2149 if (is_vmalloc_or_module_addr(kaddr))
2150 page = vmalloc_to_page(kaddr);
2152 page = virt_to_page(kaddr);
2159 maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
2160 iov_iter_advance(i, maxsize);
2165 * Extract a list of contiguous pages from a user iterator and get a pin on
2166 * each of them. This should only be used if the iterator is user-backed
2169 * It does not get refs on the pages, but the pages must be unpinned by the
2170 * caller once the transfer is complete.
2172 * This is safe to be used where background IO/DMA *is* going to be modifying
2173 * the buffer; using a pin rather than a ref makes forces fork() to give the
2174 * child a copy of the page.
2176 static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
2177 struct page ***pages,
2179 unsigned int maxpages,
2180 iov_iter_extraction_t extraction_flags,
2184 unsigned int gup_flags = 0;
2188 if (i->data_source == ITER_DEST)
2189 gup_flags |= FOLL_WRITE;
2190 if (extraction_flags & ITER_ALLOW_P2PDMA)
2191 gup_flags |= FOLL_PCI_P2PDMA;
2193 gup_flags |= FOLL_NOFAULT;
2195 addr = first_iovec_segment(i, &maxsize);
2196 *offset0 = offset = addr % PAGE_SIZE;
2198 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
2201 res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages);
2202 if (unlikely(res <= 0))
2204 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
2205 iov_iter_advance(i, maxsize);
2210 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
2211 * @i: The iterator to extract from
2212 * @pages: Where to return the list of pages
2213 * @maxsize: The maximum amount of iterator to extract
2214 * @maxpages: The maximum size of the list of pages
2215 * @extraction_flags: Flags to qualify request
2216 * @offset0: Where to return the starting offset into (*@pages)[0]
2218 * Extract a list of contiguous pages from the current point of the iterator,
2219 * advancing the iterator. The maximum number of pages and the maximum amount
2220 * of page contents can be set.
2222 * If *@pages is NULL, a page list will be allocated to the required size and
2223 * *@pages will be set to its base. If *@pages is not NULL, it will be assumed
2224 * that the caller allocated a page list at least @maxpages in size and this
2225 * will be filled in.
2227 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
2228 * be allowed on the pages extracted.
2230 * The iov_iter_extract_will_pin() function can be used to query how cleanup
2231 * should be performed.
2233 * Extra refs or pins on the pages may be obtained as follows:
2235 * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
2236 * added to the pages, but refs will not be taken.
2237 * iov_iter_extract_will_pin() will return true.
2239 * (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are
2240 * merely listed; no extra refs or pins are obtained.
2241 * iov_iter_extract_will_pin() will return 0.
2245 * (*) Use with ITER_DISCARD is not supported as that has no content.
2247 * On success, the function sets *@pages to the new pagelist, if allocated, and
2248 * sets *offset0 to the offset into the first page.
2250 * It may also return -ENOMEM and -EFAULT.
2252 ssize_t iov_iter_extract_pages(struct iov_iter *i,
2253 struct page ***pages,
2255 unsigned int maxpages,
2256 iov_iter_extraction_t extraction_flags,
2259 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
2263 if (likely(user_backed_iter(i)))
2264 return iov_iter_extract_user_pages(i, pages, maxsize,
2265 maxpages, extraction_flags,
2267 if (iov_iter_is_kvec(i))
2268 return iov_iter_extract_kvec_pages(i, pages, maxsize,
2269 maxpages, extraction_flags,
2271 if (iov_iter_is_bvec(i))
2272 return iov_iter_extract_bvec_pages(i, pages, maxsize,
2273 maxpages, extraction_flags,
2275 if (iov_iter_is_xarray(i))
2276 return iov_iter_extract_xarray_pages(i, pages, maxsize,
2277 maxpages, extraction_flags,
2281 EXPORT_SYMBOL_GPL(iov_iter_extract_pages);