Merge branch 'submitting-drivers-removal' into docs-next
[linux-block.git] / lib / iov_iter.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
7999096f 2#include <crypto/hash.h>
4f18cd31 3#include <linux/export.h>
2f8b5444 4#include <linux/bvec.h>
4d0e9df5 5#include <linux/fault-inject-usercopy.h>
4f18cd31
AV
6#include <linux/uio.h>
7#include <linux/pagemap.h>
28961998 8#include <linux/highmem.h>
91f79c43
AV
9#include <linux/slab.h>
10#include <linux/vmalloc.h>
241699cd 11#include <linux/splice.h>
bfdc5970 12#include <linux/compat.h>
a604ec7e 13#include <net/checksum.h>
d05f4435 14#include <linux/scatterlist.h>
d0ef4c36 15#include <linux/instrumented.h>
4f18cd31 16
241699cd
AV
17#define PIPE_PARANOIA /* for now */
18
5c67aa90 19/* covers iovec and kvec alike */
a6e4ec7b 20#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
7baa5099 21 size_t off = 0; \
a6e4ec7b 22 size_t skip = i->iov_offset; \
7a1bcb5d 23 do { \
7baa5099
AV
24 len = min(n, __p->iov_len - skip); \
25 if (likely(len)) { \
26 base = __p->iov_base + skip; \
27 len -= (STEP); \
28 off += len; \
29 skip += len; \
30 n -= len; \
7a1bcb5d
AV
31 if (skip < __p->iov_len) \
32 break; \
33 } \
34 __p++; \
35 skip = 0; \
36 } while (n); \
a6e4ec7b 37 i->iov_offset = skip; \
7baa5099 38 n = off; \
04a31165
AV
39}
40
a6e4ec7b 41#define iterate_bvec(i, n, base, len, off, p, STEP) { \
7baa5099 42 size_t off = 0; \
a6e4ec7b 43 unsigned skip = i->iov_offset; \
7491a2bf
AV
44 while (n) { \
45 unsigned offset = p->bv_offset + skip; \
1b4fb5ff 46 unsigned left; \
21b56c84
AV
47 void *kaddr = kmap_local_page(p->bv_page + \
48 offset / PAGE_SIZE); \
7baa5099 49 base = kaddr + offset % PAGE_SIZE; \
a6e4ec7b 50 len = min(min(n, (size_t)(p->bv_len - skip)), \
7491a2bf 51 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
1b4fb5ff 52 left = (STEP); \
21b56c84 53 kunmap_local(kaddr); \
7baa5099
AV
54 len -= left; \
55 off += len; \
56 skip += len; \
7491a2bf
AV
57 if (skip == p->bv_len) { \
58 skip = 0; \
59 p++; \
60 } \
7baa5099 61 n -= len; \
1b4fb5ff
AV
62 if (left) \
63 break; \
7491a2bf 64 } \
a6e4ec7b 65 i->iov_offset = skip; \
7baa5099 66 n = off; \
04a31165
AV
67}
68
a6e4ec7b 69#define iterate_xarray(i, n, base, len, __off, STEP) { \
1b4fb5ff 70 __label__ __out; \
622838f3 71 size_t __off = 0; \
821979f5 72 struct folio *folio; \
a6e4ec7b 73 loff_t start = i->xarray_start + i->iov_offset; \
4b179e9a 74 pgoff_t index = start / PAGE_SIZE; \
7ff50620
DH
75 XA_STATE(xas, i->xarray, index); \
76 \
821979f5 77 len = PAGE_SIZE - offset_in_page(start); \
7baa5099 78 rcu_read_lock(); \
821979f5 79 xas_for_each(&xas, folio, ULONG_MAX) { \
7baa5099 80 unsigned left; \
821979f5
MWO
81 size_t offset; \
82 if (xas_retry(&xas, folio)) \
7baa5099 83 continue; \
821979f5 84 if (WARN_ON(xa_is_value(folio))) \
7baa5099 85 break; \
821979f5 86 if (WARN_ON(folio_test_hugetlb(folio))) \
7baa5099 87 break; \
821979f5
MWO
88 offset = offset_in_folio(folio, start + __off); \
89 while (offset < folio_size(folio)) { \
90 base = kmap_local_folio(folio, offset); \
7baa5099
AV
91 len = min(n, len); \
92 left = (STEP); \
821979f5 93 kunmap_local(base); \
7baa5099
AV
94 len -= left; \
95 __off += len; \
96 n -= len; \
97 if (left || n == 0) \
98 goto __out; \
821979f5
MWO
99 offset += len; \
100 len = PAGE_SIZE; \
7baa5099 101 } \
7ff50620 102 } \
1b4fb5ff 103__out: \
7ff50620 104 rcu_read_unlock(); \
821979f5 105 i->iov_offset += __off; \
622838f3 106 n = __off; \
7ff50620
DH
107}
108
7baa5099 109#define __iterate_and_advance(i, n, base, len, off, I, K) { \
dd254f5a
AV
110 if (unlikely(i->count < n)) \
111 n = i->count; \
f5da8354 112 if (likely(n)) { \
28f38db7 113 if (likely(iter_is_iovec(i))) { \
5c67aa90 114 const struct iovec *iov = i->iov; \
7baa5099
AV
115 void __user *base; \
116 size_t len; \
117 iterate_iovec(i, n, base, len, off, \
a6e4ec7b 118 iov, (I)) \
28f38db7
AV
119 i->nr_segs -= iov - i->iov; \
120 i->iov = iov; \
121 } else if (iov_iter_is_bvec(i)) { \
1bdc76ae 122 const struct bio_vec *bvec = i->bvec; \
7baa5099
AV
123 void *base; \
124 size_t len; \
125 iterate_bvec(i, n, base, len, off, \
a6e4ec7b 126 bvec, (K)) \
7491a2bf
AV
127 i->nr_segs -= bvec - i->bvec; \
128 i->bvec = bvec; \
28f38db7 129 } else if (iov_iter_is_kvec(i)) { \
5c67aa90 130 const struct kvec *kvec = i->kvec; \
7baa5099
AV
131 void *base; \
132 size_t len; \
133 iterate_iovec(i, n, base, len, off, \
a6e4ec7b 134 kvec, (K)) \
dd254f5a
AV
135 i->nr_segs -= kvec - i->kvec; \
136 i->kvec = kvec; \
28f38db7 137 } else if (iov_iter_is_xarray(i)) { \
7baa5099
AV
138 void *base; \
139 size_t len; \
140 iterate_xarray(i, n, base, len, off, \
a6e4ec7b 141 (K)) \
7ce2a91e 142 } \
dd254f5a 143 i->count -= n; \
7ce2a91e 144 } \
7ce2a91e 145}
7baa5099
AV
146#define iterate_and_advance(i, n, base, len, off, I, K) \
147 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
7ce2a91e 148
09fc68dc
AV
149static int copyout(void __user *to, const void *from, size_t n)
150{
4d0e9df5
AL
151 if (should_fail_usercopy())
152 return n;
96d4f267 153 if (access_ok(to, n)) {
d0ef4c36 154 instrument_copy_to_user(to, from, n);
09fc68dc
AV
155 n = raw_copy_to_user(to, from, n);
156 }
157 return n;
158}
159
160static int copyin(void *to, const void __user *from, size_t n)
161{
4d0e9df5
AL
162 if (should_fail_usercopy())
163 return n;
96d4f267 164 if (access_ok(from, n)) {
d0ef4c36 165 instrument_copy_from_user(to, from, n);
09fc68dc
AV
166 n = raw_copy_from_user(to, from, n);
167 }
168 return n;
169}
170
62a8067a 171static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
4f18cd31
AV
172 struct iov_iter *i)
173{
174 size_t skip, copy, left, wanted;
175 const struct iovec *iov;
176 char __user *buf;
177 void *kaddr, *from;
178
179 if (unlikely(bytes > i->count))
180 bytes = i->count;
181
182 if (unlikely(!bytes))
183 return 0;
184
09fc68dc 185 might_fault();
4f18cd31
AV
186 wanted = bytes;
187 iov = i->iov;
188 skip = i->iov_offset;
189 buf = iov->iov_base + skip;
190 copy = min(bytes, iov->iov_len - skip);
191
bb523b40 192 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
4f18cd31
AV
193 kaddr = kmap_atomic(page);
194 from = kaddr + offset;
195
196 /* first chunk, usually the only one */
09fc68dc 197 left = copyout(buf, from, copy);
4f18cd31
AV
198 copy -= left;
199 skip += copy;
200 from += copy;
201 bytes -= copy;
202
203 while (unlikely(!left && bytes)) {
204 iov++;
205 buf = iov->iov_base;
206 copy = min(bytes, iov->iov_len);
09fc68dc 207 left = copyout(buf, from, copy);
4f18cd31
AV
208 copy -= left;
209 skip = copy;
210 from += copy;
211 bytes -= copy;
212 }
213 if (likely(!bytes)) {
214 kunmap_atomic(kaddr);
215 goto done;
216 }
217 offset = from - kaddr;
218 buf += copy;
219 kunmap_atomic(kaddr);
220 copy = min(bytes, iov->iov_len - skip);
221 }
222 /* Too bad - revert to non-atomic kmap */
3fa6c507 223
4f18cd31
AV
224 kaddr = kmap(page);
225 from = kaddr + offset;
09fc68dc 226 left = copyout(buf, from, copy);
4f18cd31
AV
227 copy -= left;
228 skip += copy;
229 from += copy;
230 bytes -= copy;
231 while (unlikely(!left && bytes)) {
232 iov++;
233 buf = iov->iov_base;
234 copy = min(bytes, iov->iov_len);
09fc68dc 235 left = copyout(buf, from, copy);
4f18cd31
AV
236 copy -= left;
237 skip = copy;
238 from += copy;
239 bytes -= copy;
240 }
241 kunmap(page);
3fa6c507 242
4f18cd31 243done:
81055e58
AV
244 if (skip == iov->iov_len) {
245 iov++;
246 skip = 0;
247 }
4f18cd31
AV
248 i->count -= wanted - bytes;
249 i->nr_segs -= iov - i->iov;
250 i->iov = iov;
251 i->iov_offset = skip;
252 return wanted - bytes;
253}
4f18cd31 254
62a8067a 255static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
f0d1bec9
AV
256 struct iov_iter *i)
257{
258 size_t skip, copy, left, wanted;
259 const struct iovec *iov;
260 char __user *buf;
261 void *kaddr, *to;
262
263 if (unlikely(bytes > i->count))
264 bytes = i->count;
265
266 if (unlikely(!bytes))
267 return 0;
268
09fc68dc 269 might_fault();
f0d1bec9
AV
270 wanted = bytes;
271 iov = i->iov;
272 skip = i->iov_offset;
273 buf = iov->iov_base + skip;
274 copy = min(bytes, iov->iov_len - skip);
275
bb523b40 276 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
f0d1bec9
AV
277 kaddr = kmap_atomic(page);
278 to = kaddr + offset;
279
280 /* first chunk, usually the only one */
09fc68dc 281 left = copyin(to, buf, copy);
f0d1bec9
AV
282 copy -= left;
283 skip += copy;
284 to += copy;
285 bytes -= copy;
286
287 while (unlikely(!left && bytes)) {
288 iov++;
289 buf = iov->iov_base;
290 copy = min(bytes, iov->iov_len);
09fc68dc 291 left = copyin(to, buf, copy);
f0d1bec9
AV
292 copy -= left;
293 skip = copy;
294 to += copy;
295 bytes -= copy;
296 }
297 if (likely(!bytes)) {
298 kunmap_atomic(kaddr);
299 goto done;
300 }
301 offset = to - kaddr;
302 buf += copy;
303 kunmap_atomic(kaddr);
304 copy = min(bytes, iov->iov_len - skip);
305 }
306 /* Too bad - revert to non-atomic kmap */
3fa6c507 307
f0d1bec9
AV
308 kaddr = kmap(page);
309 to = kaddr + offset;
09fc68dc 310 left = copyin(to, buf, copy);
f0d1bec9
AV
311 copy -= left;
312 skip += copy;
313 to += copy;
314 bytes -= copy;
315 while (unlikely(!left && bytes)) {
316 iov++;
317 buf = iov->iov_base;
318 copy = min(bytes, iov->iov_len);
09fc68dc 319 left = copyin(to, buf, copy);
f0d1bec9
AV
320 copy -= left;
321 skip = copy;
322 to += copy;
323 bytes -= copy;
324 }
325 kunmap(page);
3fa6c507 326
f0d1bec9 327done:
81055e58
AV
328 if (skip == iov->iov_len) {
329 iov++;
330 skip = 0;
331 }
f0d1bec9
AV
332 i->count -= wanted - bytes;
333 i->nr_segs -= iov - i->iov;
334 i->iov = iov;
335 i->iov_offset = skip;
336 return wanted - bytes;
337}
f0d1bec9 338
241699cd
AV
339#ifdef PIPE_PARANOIA
340static bool sanity(const struct iov_iter *i)
341{
342 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
343 unsigned int p_head = pipe->head;
344 unsigned int p_tail = pipe->tail;
345 unsigned int p_mask = pipe->ring_size - 1;
346 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
347 unsigned int i_head = i->head;
348 unsigned int idx;
349
241699cd
AV
350 if (i->iov_offset) {
351 struct pipe_buffer *p;
8cefc107 352 if (unlikely(p_occupancy == 0))
241699cd 353 goto Bad; // pipe must be non-empty
8cefc107 354 if (unlikely(i_head != p_head - 1))
241699cd
AV
355 goto Bad; // must be at the last buffer...
356
8cefc107 357 p = &pipe->bufs[i_head & p_mask];
241699cd
AV
358 if (unlikely(p->offset + p->len != i->iov_offset))
359 goto Bad; // ... at the end of segment
360 } else {
8cefc107 361 if (i_head != p_head)
241699cd
AV
362 goto Bad; // must be right after the last buffer
363 }
364 return true;
365Bad:
8cefc107
DH
366 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
367 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
368 p_head, p_tail, pipe->ring_size);
369 for (idx = 0; idx < pipe->ring_size; idx++)
241699cd
AV
370 printk(KERN_ERR "[%p %p %d %d]\n",
371 pipe->bufs[idx].ops,
372 pipe->bufs[idx].page,
373 pipe->bufs[idx].offset,
374 pipe->bufs[idx].len);
375 WARN_ON(1);
376 return false;
377}
378#else
379#define sanity(i) true
380#endif
381
241699cd
AV
382static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
383 struct iov_iter *i)
384{
385 struct pipe_inode_info *pipe = i->pipe;
386 struct pipe_buffer *buf;
8cefc107
DH
387 unsigned int p_tail = pipe->tail;
388 unsigned int p_mask = pipe->ring_size - 1;
389 unsigned int i_head = i->head;
241699cd 390 size_t off;
241699cd
AV
391
392 if (unlikely(bytes > i->count))
393 bytes = i->count;
394
395 if (unlikely(!bytes))
396 return 0;
397
398 if (!sanity(i))
399 return 0;
400
401 off = i->iov_offset;
8cefc107 402 buf = &pipe->bufs[i_head & p_mask];
241699cd
AV
403 if (off) {
404 if (offset == off && buf->page == page) {
405 /* merge with the last one */
406 buf->len += bytes;
407 i->iov_offset += bytes;
408 goto out;
409 }
8cefc107
DH
410 i_head++;
411 buf = &pipe->bufs[i_head & p_mask];
241699cd 412 }
6718b6f8 413 if (pipe_full(i_head, p_tail, pipe->max_usage))
241699cd 414 return 0;
8cefc107 415
241699cd 416 buf->ops = &page_cache_pipe_buf_ops;
9d2231c5 417 buf->flags = 0;
8cefc107
DH
418 get_page(page);
419 buf->page = page;
241699cd
AV
420 buf->offset = offset;
421 buf->len = bytes;
8cefc107
DH
422
423 pipe->head = i_head + 1;
241699cd 424 i->iov_offset = offset + bytes;
8cefc107 425 i->head = i_head;
241699cd
AV
426out:
427 i->count -= bytes;
428 return bytes;
429}
430
171a0203 431/*
a6294593
AG
432 * fault_in_iov_iter_readable - fault in iov iterator for reading
433 * @i: iterator
434 * @size: maximum length
435 *
171a0203 436 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
a6294593
AG
437 * @size. For each iovec, fault in each page that constitutes the iovec.
438 *
439 * Returns the number of bytes not faulted in (like copy_to_user() and
440 * copy_from_user()).
171a0203 441 *
a6294593 442 * Always returns 0 for non-userspace iterators.
171a0203 443 */
a6294593 444size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
171a0203 445{
0e8f0d67 446 if (iter_is_iovec(i)) {
a6294593 447 size_t count = min(size, iov_iter_count(i));
8409a0d2
AV
448 const struct iovec *p;
449 size_t skip;
450
a6294593
AG
451 size -= count;
452 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
453 size_t len = min(count, p->iov_len - skip);
454 size_t ret;
8409a0d2
AV
455
456 if (unlikely(!len))
457 continue;
a6294593
AG
458 ret = fault_in_readable(p->iov_base + skip, len);
459 count -= len - ret;
460 if (ret)
461 break;
8409a0d2 462 }
a6294593 463 return count + size;
171a0203
AA
464 }
465 return 0;
466}
a6294593 467EXPORT_SYMBOL(fault_in_iov_iter_readable);
171a0203 468
cdd591fc
AG
469/*
470 * fault_in_iov_iter_writeable - fault in iov iterator for writing
471 * @i: iterator
472 * @size: maximum length
473 *
474 * Faults in the iterator using get_user_pages(), i.e., without triggering
475 * hardware page faults. This is primarily useful when we already know that
476 * some or all of the pages in @i aren't in memory.
477 *
478 * Returns the number of bytes not faulted in, like copy_to_user() and
479 * copy_from_user().
480 *
481 * Always returns 0 for non-user-space iterators.
482 */
483size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
484{
485 if (iter_is_iovec(i)) {
486 size_t count = min(size, iov_iter_count(i));
487 const struct iovec *p;
488 size_t skip;
489
490 size -= count;
491 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
492 size_t len = min(count, p->iov_len - skip);
493 size_t ret;
494
495 if (unlikely(!len))
496 continue;
497 ret = fault_in_safe_writeable(p->iov_base + skip, len);
498 count -= len - ret;
499 if (ret)
500 break;
501 }
502 return count + size;
503 }
504 return 0;
505}
506EXPORT_SYMBOL(fault_in_iov_iter_writeable);
507
aa563d7b 508void iov_iter_init(struct iov_iter *i, unsigned int direction,
71d8e532
AV
509 const struct iovec *iov, unsigned long nr_segs,
510 size_t count)
511{
aa563d7b 512 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
513 *i = (struct iov_iter) {
514 .iter_type = ITER_IOVEC,
3337ab08 515 .nofault = false,
8cd54c1c
AV
516 .data_source = direction,
517 .iov = iov,
518 .nr_segs = nr_segs,
519 .iov_offset = 0,
520 .count = count
521 };
71d8e532
AV
522}
523EXPORT_SYMBOL(iov_iter_init);
7b2c99d1 524
241699cd
AV
525static inline bool allocated(struct pipe_buffer *buf)
526{
527 return buf->ops == &default_pipe_buf_ops;
528}
529
8cefc107
DH
530static inline void data_start(const struct iov_iter *i,
531 unsigned int *iter_headp, size_t *offp)
241699cd 532{
8cefc107
DH
533 unsigned int p_mask = i->pipe->ring_size - 1;
534 unsigned int iter_head = i->head;
241699cd 535 size_t off = i->iov_offset;
8cefc107
DH
536
537 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
538 off == PAGE_SIZE)) {
539 iter_head++;
241699cd
AV
540 off = 0;
541 }
8cefc107 542 *iter_headp = iter_head;
241699cd
AV
543 *offp = off;
544}
545
546static size_t push_pipe(struct iov_iter *i, size_t size,
8cefc107 547 int *iter_headp, size_t *offp)
241699cd
AV
548{
549 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
550 unsigned int p_tail = pipe->tail;
551 unsigned int p_mask = pipe->ring_size - 1;
552 unsigned int iter_head;
241699cd 553 size_t off;
241699cd
AV
554 ssize_t left;
555
556 if (unlikely(size > i->count))
557 size = i->count;
558 if (unlikely(!size))
559 return 0;
560
561 left = size;
8cefc107
DH
562 data_start(i, &iter_head, &off);
563 *iter_headp = iter_head;
241699cd
AV
564 *offp = off;
565 if (off) {
566 left -= PAGE_SIZE - off;
567 if (left <= 0) {
8cefc107 568 pipe->bufs[iter_head & p_mask].len += size;
241699cd
AV
569 return size;
570 }
8cefc107
DH
571 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
572 iter_head++;
241699cd 573 }
6718b6f8 574 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
8cefc107 575 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
241699cd
AV
576 struct page *page = alloc_page(GFP_USER);
577 if (!page)
578 break;
8cefc107
DH
579
580 buf->ops = &default_pipe_buf_ops;
9d2231c5 581 buf->flags = 0;
8cefc107
DH
582 buf->page = page;
583 buf->offset = 0;
584 buf->len = min_t(ssize_t, left, PAGE_SIZE);
585 left -= buf->len;
586 iter_head++;
587 pipe->head = iter_head;
588
589 if (left == 0)
241699cd 590 return size;
241699cd
AV
591 }
592 return size - left;
593}
594
595static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
596 struct iov_iter *i)
597{
598 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
599 unsigned int p_mask = pipe->ring_size - 1;
600 unsigned int i_head;
241699cd 601 size_t n, off;
241699cd
AV
602
603 if (!sanity(i))
604 return 0;
605
8cefc107 606 bytes = n = push_pipe(i, bytes, &i_head, &off);
241699cd
AV
607 if (unlikely(!n))
608 return 0;
8cefc107 609 do {
241699cd 610 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
8cefc107
DH
611 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
612 i->head = i_head;
241699cd
AV
613 i->iov_offset = off + chunk;
614 n -= chunk;
615 addr += chunk;
8cefc107
DH
616 off = 0;
617 i_head++;
618 } while (n);
241699cd
AV
619 i->count -= bytes;
620 return bytes;
621}
622
f9152895
AV
623static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
624 __wsum sum, size_t off)
625{
cc44c17b 626 __wsum next = csum_partial_copy_nocheck(from, to, len);
f9152895
AV
627 return csum_block_add(sum, next, off);
628}
629
78e1f386 630static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
6852df12 631 struct iov_iter *i, __wsum *sump)
78e1f386
AV
632{
633 struct pipe_inode_info *pipe = i->pipe;
8cefc107 634 unsigned int p_mask = pipe->ring_size - 1;
6852df12
AV
635 __wsum sum = *sump;
636 size_t off = 0;
8cefc107 637 unsigned int i_head;
6852df12 638 size_t r;
78e1f386
AV
639
640 if (!sanity(i))
641 return 0;
642
6852df12
AV
643 bytes = push_pipe(i, bytes, &i_head, &r);
644 while (bytes) {
645 size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r);
2495bdcc 646 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
6852df12 647 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
2495bdcc 648 kunmap_local(p);
8cefc107 649 i->head = i_head;
78e1f386 650 i->iov_offset = r + chunk;
6852df12 651 bytes -= chunk;
78e1f386 652 off += chunk;
8cefc107
DH
653 r = 0;
654 i_head++;
6852df12
AV
655 }
656 *sump = sum;
657 i->count -= off;
658 return off;
78e1f386
AV
659}
660
aa28de27 661size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
62a8067a 662{
00e23707 663 if (unlikely(iov_iter_is_pipe(i)))
241699cd 664 return copy_pipe_to_iter(addr, bytes, i);
09fc68dc
AV
665 if (iter_is_iovec(i))
666 might_fault();
7baa5099
AV
667 iterate_and_advance(i, bytes, base, len, off,
668 copyout(base, addr + off, len),
669 memcpy(base, addr + off, len)
3d4d3e48 670 )
62a8067a 671
3d4d3e48 672 return bytes;
c35e0248 673}
aa28de27 674EXPORT_SYMBOL(_copy_to_iter);
c35e0248 675
ec6347bb
DW
676#ifdef CONFIG_ARCH_HAS_COPY_MC
677static int copyout_mc(void __user *to, const void *from, size_t n)
8780356e 678{
96d4f267 679 if (access_ok(to, n)) {
d0ef4c36 680 instrument_copy_to_user(to, from, n);
ec6347bb 681 n = copy_mc_to_user((__force void *) to, from, n);
8780356e
DW
682 }
683 return n;
684}
685
ec6347bb 686static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
ca146f6f
DW
687 struct iov_iter *i)
688{
689 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
690 unsigned int p_mask = pipe->ring_size - 1;
691 unsigned int i_head;
ca146f6f 692 size_t n, off, xfer = 0;
ca146f6f
DW
693
694 if (!sanity(i))
695 return 0;
696
2a510a74
AV
697 n = push_pipe(i, bytes, &i_head, &off);
698 while (n) {
ca146f6f 699 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
2a510a74 700 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
ca146f6f 701 unsigned long rem;
2a510a74
AV
702 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
703 chunk -= rem;
704 kunmap_local(p);
8cefc107 705 i->head = i_head;
2a510a74
AV
706 i->iov_offset = off + chunk;
707 xfer += chunk;
ca146f6f
DW
708 if (rem)
709 break;
710 n -= chunk;
8cefc107
DH
711 off = 0;
712 i_head++;
2a510a74 713 }
ca146f6f
DW
714 i->count -= xfer;
715 return xfer;
716}
717
bf3eeb9b 718/**
ec6347bb 719 * _copy_mc_to_iter - copy to iter with source memory error exception handling
bf3eeb9b
DW
720 * @addr: source kernel address
721 * @bytes: total transfer length
44e55997 722 * @i: destination iterator
bf3eeb9b 723 *
ec6347bb
DW
724 * The pmem driver deploys this for the dax operation
725 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
726 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
727 * successfully copied.
bf3eeb9b 728 *
ec6347bb 729 * The main differences between this and typical _copy_to_iter().
bf3eeb9b
DW
730 *
731 * * Typical tail/residue handling after a fault retries the copy
732 * byte-by-byte until the fault happens again. Re-triggering machine
733 * checks is potentially fatal so the implementation uses source
734 * alignment and poison alignment assumptions to avoid re-triggering
735 * hardware exceptions.
736 *
737 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
738 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
739 * a short copy.
44e55997
RD
740 *
741 * Return: number of bytes copied (may be %0)
bf3eeb9b 742 */
ec6347bb 743size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
8780356e 744{
00e23707 745 if (unlikely(iov_iter_is_pipe(i)))
ec6347bb 746 return copy_mc_pipe_to_iter(addr, bytes, i);
8780356e
DW
747 if (iter_is_iovec(i))
748 might_fault();
7baa5099
AV
749 __iterate_and_advance(i, bytes, base, len, off,
750 copyout_mc(base, addr + off, len),
751 copy_mc_to_kernel(base, addr + off, len)
8780356e
DW
752 )
753
754 return bytes;
755}
ec6347bb
DW
756EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
757#endif /* CONFIG_ARCH_HAS_COPY_MC */
8780356e 758
aa28de27 759size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
c35e0248 760{
00e23707 761 if (unlikely(iov_iter_is_pipe(i))) {
241699cd
AV
762 WARN_ON(1);
763 return 0;
764 }
09fc68dc
AV
765 if (iter_is_iovec(i))
766 might_fault();
7baa5099
AV
767 iterate_and_advance(i, bytes, base, len, off,
768 copyin(addr + off, base, len),
769 memcpy(addr + off, base, len)
0dbca9a4
AV
770 )
771
772 return bytes;
c35e0248 773}
aa28de27 774EXPORT_SYMBOL(_copy_from_iter);
c35e0248 775
aa28de27 776size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
aa583096 777{
00e23707 778 if (unlikely(iov_iter_is_pipe(i))) {
241699cd
AV
779 WARN_ON(1);
780 return 0;
781 }
7baa5099
AV
782 iterate_and_advance(i, bytes, base, len, off,
783 __copy_from_user_inatomic_nocache(addr + off, base, len),
784 memcpy(addr + off, base, len)
aa583096
AV
785 )
786
787 return bytes;
788}
aa28de27 789EXPORT_SYMBOL(_copy_from_iter_nocache);
aa583096 790
0aed55af 791#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
abd08d7d
DW
792/**
793 * _copy_from_iter_flushcache - write destination through cpu cache
794 * @addr: destination kernel address
795 * @bytes: total transfer length
44e55997 796 * @i: source iterator
abd08d7d
DW
797 *
798 * The pmem driver arranges for filesystem-dax to use this facility via
799 * dax_copy_from_iter() for ensuring that writes to persistent memory
800 * are flushed through the CPU cache. It is differentiated from
801 * _copy_from_iter_nocache() in that guarantees all data is flushed for
802 * all iterator types. The _copy_from_iter_nocache() only attempts to
803 * bypass the cache for the ITER_IOVEC case, and on some archs may use
804 * instructions that strand dirty-data in the cache.
44e55997
RD
805 *
806 * Return: number of bytes copied (may be %0)
abd08d7d 807 */
6a37e940 808size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
0aed55af 809{
00e23707 810 if (unlikely(iov_iter_is_pipe(i))) {
0aed55af
DW
811 WARN_ON(1);
812 return 0;
813 }
7baa5099
AV
814 iterate_and_advance(i, bytes, base, len, off,
815 __copy_from_user_flushcache(addr + off, base, len),
816 memcpy_flushcache(addr + off, base, len)
0aed55af
DW
817 )
818
819 return bytes;
820}
6a37e940 821EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
0aed55af
DW
822#endif
823
72e809ed
AV
824static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
825{
6daef95b
ED
826 struct page *head;
827 size_t v = n + offset;
828
829 /*
830 * The general case needs to access the page order in order
831 * to compute the page size.
832 * However, we mostly deal with order-0 pages and thus can
833 * avoid a possible cache line miss for requests that fit all
834 * page orders.
835 */
836 if (n <= v && v <= PAGE_SIZE)
837 return true;
838
839 head = compound_head(page);
840 v += (page - head) << PAGE_SHIFT;
a90bcb86 841
a50b854e 842 if (likely(n <= v && v <= (page_size(head))))
72e809ed
AV
843 return true;
844 WARN_ON(1);
845 return false;
846}
cbbd26b8 847
08aa6479 848static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
62a8067a
AV
849 struct iov_iter *i)
850{
28f38db7
AV
851 if (likely(iter_is_iovec(i)))
852 return copy_page_to_iter_iovec(page, offset, bytes, i);
853 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
c1d4d6a9
AV
854 void *kaddr = kmap_local_page(page);
855 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
856 kunmap_local(kaddr);
d271524a 857 return wanted;
28f38db7
AV
858 }
859 if (iov_iter_is_pipe(i))
860 return copy_page_to_iter_pipe(page, offset, bytes, i);
861 if (unlikely(iov_iter_is_discard(i))) {
a506abc7
AV
862 if (unlikely(i->count < bytes))
863 bytes = i->count;
864 i->count -= bytes;
9ea9ce04 865 return bytes;
28f38db7
AV
866 }
867 WARN_ON(1);
868 return 0;
62a8067a 869}
08aa6479
AV
870
871size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
872 struct iov_iter *i)
873{
874 size_t res = 0;
875 if (unlikely(!page_copy_sane(page, offset, bytes)))
876 return 0;
877 page += offset / PAGE_SIZE; // first subpage
878 offset %= PAGE_SIZE;
879 while (1) {
880 size_t n = __copy_page_to_iter(page, offset,
881 min(bytes, (size_t)PAGE_SIZE - offset), i);
882 res += n;
883 bytes -= n;
884 if (!bytes || !n)
885 break;
886 offset += n;
887 if (offset == PAGE_SIZE) {
888 page++;
889 offset = 0;
890 }
891 }
892 return res;
893}
62a8067a
AV
894EXPORT_SYMBOL(copy_page_to_iter);
895
896size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
897 struct iov_iter *i)
898{
72e809ed
AV
899 if (unlikely(!page_copy_sane(page, offset, bytes)))
900 return 0;
28f38db7
AV
901 if (likely(iter_is_iovec(i)))
902 return copy_page_from_iter_iovec(page, offset, bytes, i);
903 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
55ca375c 904 void *kaddr = kmap_local_page(page);
aa28de27 905 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
55ca375c 906 kunmap_local(kaddr);
d271524a 907 return wanted;
28f38db7
AV
908 }
909 WARN_ON(1);
910 return 0;
62a8067a
AV
911}
912EXPORT_SYMBOL(copy_page_from_iter);
913
241699cd
AV
914static size_t pipe_zero(size_t bytes, struct iov_iter *i)
915{
916 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
917 unsigned int p_mask = pipe->ring_size - 1;
918 unsigned int i_head;
241699cd 919 size_t n, off;
241699cd
AV
920
921 if (!sanity(i))
922 return 0;
923
8cefc107 924 bytes = n = push_pipe(i, bytes, &i_head, &off);
241699cd
AV
925 if (unlikely(!n))
926 return 0;
927
8cefc107 928 do {
241699cd 929 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
893839fd
AV
930 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
931 memset(p + off, 0, chunk);
932 kunmap_local(p);
8cefc107 933 i->head = i_head;
241699cd
AV
934 i->iov_offset = off + chunk;
935 n -= chunk;
8cefc107
DH
936 off = 0;
937 i_head++;
938 } while (n);
241699cd
AV
939 i->count -= bytes;
940 return bytes;
941}
942
c35e0248
MW
943size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
944{
00e23707 945 if (unlikely(iov_iter_is_pipe(i)))
241699cd 946 return pipe_zero(bytes, i);
7baa5099
AV
947 iterate_and_advance(i, bytes, base, len, count,
948 clear_user(base, len),
949 memset(base, 0, len)
8442fa46
AV
950 )
951
952 return bytes;
c35e0248
MW
953}
954EXPORT_SYMBOL(iov_iter_zero);
955
f0b65f39
AV
956size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
957 struct iov_iter *i)
62a8067a 958{
04a31165 959 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
72e809ed
AV
960 if (unlikely(!page_copy_sane(page, offset, bytes))) {
961 kunmap_atomic(kaddr);
962 return 0;
963 }
9ea9ce04 964 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd
AV
965 kunmap_atomic(kaddr);
966 WARN_ON(1);
967 return 0;
968 }
7baa5099
AV
969 iterate_and_advance(i, bytes, base, len, off,
970 copyin(p + off, base, len),
971 memcpy(p + off, base, len)
04a31165
AV
972 )
973 kunmap_atomic(kaddr);
974 return bytes;
62a8067a 975}
f0b65f39 976EXPORT_SYMBOL(copy_page_from_iter_atomic);
62a8067a 977
b9dc6f65
AV
978static inline void pipe_truncate(struct iov_iter *i)
979{
980 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
981 unsigned int p_tail = pipe->tail;
982 unsigned int p_head = pipe->head;
983 unsigned int p_mask = pipe->ring_size - 1;
984
985 if (!pipe_empty(p_head, p_tail)) {
986 struct pipe_buffer *buf;
987 unsigned int i_head = i->head;
b9dc6f65 988 size_t off = i->iov_offset;
8cefc107 989
b9dc6f65 990 if (off) {
8cefc107
DH
991 buf = &pipe->bufs[i_head & p_mask];
992 buf->len = off - buf->offset;
993 i_head++;
b9dc6f65 994 }
8cefc107
DH
995 while (p_head != i_head) {
996 p_head--;
997 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
b9dc6f65 998 }
8cefc107
DH
999
1000 pipe->head = p_head;
b9dc6f65
AV
1001 }
1002}
1003
241699cd
AV
1004static void pipe_advance(struct iov_iter *i, size_t size)
1005{
1006 struct pipe_inode_info *pipe = i->pipe;
241699cd 1007 if (size) {
b9dc6f65 1008 struct pipe_buffer *buf;
8cefc107
DH
1009 unsigned int p_mask = pipe->ring_size - 1;
1010 unsigned int i_head = i->head;
b9dc6f65 1011 size_t off = i->iov_offset, left = size;
8cefc107 1012
241699cd 1013 if (off) /* make it relative to the beginning of buffer */
8cefc107 1014 left += off - pipe->bufs[i_head & p_mask].offset;
241699cd 1015 while (1) {
8cefc107 1016 buf = &pipe->bufs[i_head & p_mask];
b9dc6f65 1017 if (left <= buf->len)
241699cd 1018 break;
b9dc6f65 1019 left -= buf->len;
8cefc107 1020 i_head++;
241699cd 1021 }
8cefc107 1022 i->head = i_head;
b9dc6f65 1023 i->iov_offset = buf->offset + left;
241699cd 1024 }
b9dc6f65
AV
1025 i->count -= size;
1026 /* ... and discard everything past that point */
1027 pipe_truncate(i);
241699cd
AV
1028}
1029
54c8195b
PB
1030static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
1031{
1032 struct bvec_iter bi;
1033
1034 bi.bi_size = i->count;
1035 bi.bi_bvec_done = i->iov_offset;
1036 bi.bi_idx = 0;
1037 bvec_iter_advance(i->bvec, &bi, size);
1038
1039 i->bvec += bi.bi_idx;
1040 i->nr_segs -= bi.bi_idx;
1041 i->count = bi.bi_size;
1042 i->iov_offset = bi.bi_bvec_done;
1043}
1044
185ac4d4
AV
1045static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
1046{
1047 const struct iovec *iov, *end;
1048
1049 if (!i->count)
1050 return;
1051 i->count -= size;
1052
1053 size += i->iov_offset; // from beginning of current segment
1054 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
1055 if (likely(size < iov->iov_len))
1056 break;
1057 size -= iov->iov_len;
1058 }
1059 i->iov_offset = size;
1060 i->nr_segs -= iov - i->iov;
1061 i->iov = iov;
1062}
1063
62a8067a
AV
1064void iov_iter_advance(struct iov_iter *i, size_t size)
1065{
3b3fc051
AV
1066 if (unlikely(i->count < size))
1067 size = i->count;
185ac4d4
AV
1068 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
1069 /* iovec and kvec have identical layouts */
1070 iov_iter_iovec_advance(i, size);
1071 } else if (iov_iter_is_bvec(i)) {
1072 iov_iter_bvec_advance(i, size);
1073 } else if (iov_iter_is_pipe(i)) {
241699cd 1074 pipe_advance(i, size);
185ac4d4 1075 } else if (unlikely(iov_iter_is_xarray(i))) {
7ff50620
DH
1076 i->iov_offset += size;
1077 i->count -= size;
185ac4d4
AV
1078 } else if (iov_iter_is_discard(i)) {
1079 i->count -= size;
54c8195b 1080 }
62a8067a
AV
1081}
1082EXPORT_SYMBOL(iov_iter_advance);
1083
27c0e374
AV
1084void iov_iter_revert(struct iov_iter *i, size_t unroll)
1085{
1086 if (!unroll)
1087 return;
5b47d59a
AV
1088 if (WARN_ON(unroll > MAX_RW_COUNT))
1089 return;
27c0e374 1090 i->count += unroll;
00e23707 1091 if (unlikely(iov_iter_is_pipe(i))) {
27c0e374 1092 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
1093 unsigned int p_mask = pipe->ring_size - 1;
1094 unsigned int i_head = i->head;
27c0e374
AV
1095 size_t off = i->iov_offset;
1096 while (1) {
8cefc107
DH
1097 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1098 size_t n = off - b->offset;
27c0e374 1099 if (unroll < n) {
4fa55cef 1100 off -= unroll;
27c0e374
AV
1101 break;
1102 }
1103 unroll -= n;
8cefc107 1104 if (!unroll && i_head == i->start_head) {
27c0e374
AV
1105 off = 0;
1106 break;
1107 }
8cefc107
DH
1108 i_head--;
1109 b = &pipe->bufs[i_head & p_mask];
1110 off = b->offset + b->len;
27c0e374
AV
1111 }
1112 i->iov_offset = off;
8cefc107 1113 i->head = i_head;
27c0e374
AV
1114 pipe_truncate(i);
1115 return;
1116 }
9ea9ce04
DH
1117 if (unlikely(iov_iter_is_discard(i)))
1118 return;
27c0e374
AV
1119 if (unroll <= i->iov_offset) {
1120 i->iov_offset -= unroll;
1121 return;
1122 }
1123 unroll -= i->iov_offset;
7ff50620
DH
1124 if (iov_iter_is_xarray(i)) {
1125 BUG(); /* We should never go beyond the start of the specified
1126 * range since we might then be straying into pages that
1127 * aren't pinned.
1128 */
1129 } else if (iov_iter_is_bvec(i)) {
27c0e374
AV
1130 const struct bio_vec *bvec = i->bvec;
1131 while (1) {
1132 size_t n = (--bvec)->bv_len;
1133 i->nr_segs++;
1134 if (unroll <= n) {
1135 i->bvec = bvec;
1136 i->iov_offset = n - unroll;
1137 return;
1138 }
1139 unroll -= n;
1140 }
1141 } else { /* same logics for iovec and kvec */
1142 const struct iovec *iov = i->iov;
1143 while (1) {
1144 size_t n = (--iov)->iov_len;
1145 i->nr_segs++;
1146 if (unroll <= n) {
1147 i->iov = iov;
1148 i->iov_offset = n - unroll;
1149 return;
1150 }
1151 unroll -= n;
1152 }
1153 }
1154}
1155EXPORT_SYMBOL(iov_iter_revert);
1156
62a8067a
AV
1157/*
1158 * Return the count of just the current iov_iter segment.
1159 */
1160size_t iov_iter_single_seg_count(const struct iov_iter *i)
1161{
28f38db7
AV
1162 if (i->nr_segs > 1) {
1163 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1164 return min(i->count, i->iov->iov_len - i->iov_offset);
1165 if (iov_iter_is_bvec(i))
1166 return min(i->count, i->bvec->bv_len - i->iov_offset);
1167 }
1168 return i->count;
62a8067a
AV
1169}
1170EXPORT_SYMBOL(iov_iter_single_seg_count);
1171
aa563d7b 1172void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
05afcb77 1173 const struct kvec *kvec, unsigned long nr_segs,
abb78f87
AV
1174 size_t count)
1175{
aa563d7b 1176 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
1177 *i = (struct iov_iter){
1178 .iter_type = ITER_KVEC,
1179 .data_source = direction,
1180 .kvec = kvec,
1181 .nr_segs = nr_segs,
1182 .iov_offset = 0,
1183 .count = count
1184 };
abb78f87
AV
1185}
1186EXPORT_SYMBOL(iov_iter_kvec);
1187
aa563d7b 1188void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
05afcb77
AV
1189 const struct bio_vec *bvec, unsigned long nr_segs,
1190 size_t count)
1191{
aa563d7b 1192 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
1193 *i = (struct iov_iter){
1194 .iter_type = ITER_BVEC,
1195 .data_source = direction,
1196 .bvec = bvec,
1197 .nr_segs = nr_segs,
1198 .iov_offset = 0,
1199 .count = count
1200 };
05afcb77
AV
1201}
1202EXPORT_SYMBOL(iov_iter_bvec);
1203
aa563d7b 1204void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
241699cd
AV
1205 struct pipe_inode_info *pipe,
1206 size_t count)
1207{
aa563d7b 1208 BUG_ON(direction != READ);
8cefc107 1209 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
8cd54c1c
AV
1210 *i = (struct iov_iter){
1211 .iter_type = ITER_PIPE,
1212 .data_source = false,
1213 .pipe = pipe,
1214 .head = pipe->head,
1215 .start_head = pipe->head,
1216 .iov_offset = 0,
1217 .count = count
1218 };
241699cd
AV
1219}
1220EXPORT_SYMBOL(iov_iter_pipe);
1221
7ff50620
DH
1222/**
1223 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1224 * @i: The iterator to initialise.
1225 * @direction: The direction of the transfer.
1226 * @xarray: The xarray to access.
1227 * @start: The start file position.
1228 * @count: The size of the I/O buffer in bytes.
1229 *
1230 * Set up an I/O iterator to either draw data out of the pages attached to an
1231 * inode or to inject data into those pages. The pages *must* be prevented
1232 * from evaporation, either by taking a ref on them or locking them by the
1233 * caller.
1234 */
1235void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1236 struct xarray *xarray, loff_t start, size_t count)
1237{
1238 BUG_ON(direction & ~1);
8cd54c1c
AV
1239 *i = (struct iov_iter) {
1240 .iter_type = ITER_XARRAY,
1241 .data_source = direction,
1242 .xarray = xarray,
1243 .xarray_start = start,
1244 .count = count,
1245 .iov_offset = 0
1246 };
7ff50620
DH
1247}
1248EXPORT_SYMBOL(iov_iter_xarray);
1249
9ea9ce04
DH
1250/**
1251 * iov_iter_discard - Initialise an I/O iterator that discards data
1252 * @i: The iterator to initialise.
1253 * @direction: The direction of the transfer.
1254 * @count: The size of the I/O buffer in bytes.
1255 *
1256 * Set up an I/O iterator that just discards everything that's written to it.
1257 * It's only available as a READ iterator.
1258 */
1259void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1260{
1261 BUG_ON(direction != READ);
8cd54c1c
AV
1262 *i = (struct iov_iter){
1263 .iter_type = ITER_DISCARD,
1264 .data_source = false,
1265 .count = count,
1266 .iov_offset = 0
1267 };
9ea9ce04
DH
1268}
1269EXPORT_SYMBOL(iov_iter_discard);
1270
9221d2e3 1271static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
62a8067a 1272{
04a31165
AV
1273 unsigned long res = 0;
1274 size_t size = i->count;
9221d2e3
AV
1275 size_t skip = i->iov_offset;
1276 unsigned k;
1277
1278 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1279 size_t len = i->iov[k].iov_len - skip;
1280 if (len) {
1281 res |= (unsigned long)i->iov[k].iov_base + skip;
1282 if (len > size)
1283 len = size;
1284 res |= len;
1285 size -= len;
1286 if (!size)
1287 break;
1288 }
1289 }
1290 return res;
1291}
04a31165 1292
9221d2e3
AV
1293static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1294{
1295 unsigned res = 0;
1296 size_t size = i->count;
1297 unsigned skip = i->iov_offset;
1298 unsigned k;
1299
1300 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1301 size_t len = i->bvec[k].bv_len - skip;
1302 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1303 if (len > size)
1304 len = size;
1305 res |= len;
1306 size -= len;
1307 if (!size)
1308 break;
1309 }
1310 return res;
1311}
1312
1313unsigned long iov_iter_alignment(const struct iov_iter *i)
1314{
1315 /* iovec and kvec have identical layouts */
1316 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1317 return iov_iter_alignment_iovec(i);
1318
1319 if (iov_iter_is_bvec(i))
1320 return iov_iter_alignment_bvec(i);
1321
1322 if (iov_iter_is_pipe(i)) {
e0ff126e 1323 unsigned int p_mask = i->pipe->ring_size - 1;
9221d2e3 1324 size_t size = i->count;
e0ff126e 1325
8cefc107 1326 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
241699cd
AV
1327 return size | i->iov_offset;
1328 return size;
1329 }
9221d2e3
AV
1330
1331 if (iov_iter_is_xarray(i))
3d14ec1f 1332 return (i->xarray_start + i->iov_offset) | i->count;
9221d2e3
AV
1333
1334 return 0;
62a8067a
AV
1335}
1336EXPORT_SYMBOL(iov_iter_alignment);
1337
357f435d
AV
1338unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1339{
33844e66 1340 unsigned long res = 0;
610c7a71 1341 unsigned long v = 0;
357f435d 1342 size_t size = i->count;
610c7a71 1343 unsigned k;
357f435d 1344
610c7a71 1345 if (WARN_ON(!iter_is_iovec(i)))
241699cd 1346 return ~0U;
241699cd 1347
610c7a71
AV
1348 for (k = 0; k < i->nr_segs; k++) {
1349 if (i->iov[k].iov_len) {
1350 unsigned long base = (unsigned long)i->iov[k].iov_base;
1351 if (v) // if not the first one
1352 res |= base | v; // this start | previous end
1353 v = base + i->iov[k].iov_len;
1354 if (size <= i->iov[k].iov_len)
1355 break;
1356 size -= i->iov[k].iov_len;
1357 }
1358 }
33844e66 1359 return res;
357f435d
AV
1360}
1361EXPORT_SYMBOL(iov_iter_gap_alignment);
1362
e76b6312 1363static inline ssize_t __pipe_get_pages(struct iov_iter *i,
241699cd
AV
1364 size_t maxsize,
1365 struct page **pages,
8cefc107 1366 int iter_head,
241699cd
AV
1367 size_t *start)
1368{
1369 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
1370 unsigned int p_mask = pipe->ring_size - 1;
1371 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
241699cd
AV
1372 if (!n)
1373 return -EFAULT;
1374
1375 maxsize = n;
1376 n += *start;
1689c73a 1377 while (n > 0) {
8cefc107
DH
1378 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1379 iter_head++;
241699cd
AV
1380 n -= PAGE_SIZE;
1381 }
1382
1383 return maxsize;
1384}
1385
1386static ssize_t pipe_get_pages(struct iov_iter *i,
1387 struct page **pages, size_t maxsize, unsigned maxpages,
1388 size_t *start)
1389{
8cefc107 1390 unsigned int iter_head, npages;
241699cd 1391 size_t capacity;
241699cd
AV
1392
1393 if (!sanity(i))
1394 return -EFAULT;
1395
8cefc107
DH
1396 data_start(i, &iter_head, start);
1397 /* Amount of free space: some of this one + all after this one */
1398 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1399 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
241699cd 1400
8cefc107 1401 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
241699cd
AV
1402}
1403
7ff50620
DH
1404static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1405 pgoff_t index, unsigned int nr_pages)
1406{
1407 XA_STATE(xas, xa, index);
1408 struct page *page;
1409 unsigned int ret = 0;
1410
1411 rcu_read_lock();
1412 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1413 if (xas_retry(&xas, page))
1414 continue;
1415
1416 /* Has the page moved or been split? */
1417 if (unlikely(page != xas_reload(&xas))) {
1418 xas_reset(&xas);
1419 continue;
1420 }
1421
1422 pages[ret] = find_subpage(page, xas.xa_index);
1423 get_page(pages[ret]);
1424 if (++ret == nr_pages)
1425 break;
1426 }
1427 rcu_read_unlock();
1428 return ret;
1429}
1430
1431static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1432 struct page **pages, size_t maxsize,
1433 unsigned maxpages, size_t *_start_offset)
1434{
1435 unsigned nr, offset;
1436 pgoff_t index, count;
6c776766 1437 size_t size = maxsize;
7ff50620
DH
1438 loff_t pos;
1439
1440 if (!size || !maxpages)
1441 return 0;
1442
1443 pos = i->xarray_start + i->iov_offset;
1444 index = pos >> PAGE_SHIFT;
1445 offset = pos & ~PAGE_MASK;
1446 *_start_offset = offset;
1447
1448 count = 1;
1449 if (size > PAGE_SIZE - offset) {
1450 size -= PAGE_SIZE - offset;
1451 count += size >> PAGE_SHIFT;
1452 size &= ~PAGE_MASK;
1453 if (size)
1454 count++;
1455 }
1456
1457 if (count > maxpages)
1458 count = maxpages;
1459
1460 nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1461 if (nr == 0)
1462 return 0;
1463
1c27f1fc 1464 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
7ff50620
DH
1465}
1466
3d671ca6
AV
1467/* must be done on non-empty ITER_IOVEC one */
1468static unsigned long first_iovec_segment(const struct iov_iter *i,
1469 size_t *size, size_t *start,
1470 size_t maxsize, unsigned maxpages)
1471{
1472 size_t skip;
1473 long k;
1474
1475 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1476 unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
1477 size_t len = i->iov[k].iov_len - skip;
1478
1479 if (unlikely(!len))
1480 continue;
1481 if (len > maxsize)
1482 len = maxsize;
1483 len += (*start = addr % PAGE_SIZE);
1484 if (len > maxpages * PAGE_SIZE)
1485 len = maxpages * PAGE_SIZE;
1486 *size = len;
1487 return addr & PAGE_MASK;
1488 }
1489 BUG(); // if it had been empty, we wouldn't get called
1490}
1491
1492/* must be done on non-empty ITER_BVEC one */
1493static struct page *first_bvec_segment(const struct iov_iter *i,
1494 size_t *size, size_t *start,
1495 size_t maxsize, unsigned maxpages)
1496{
1497 struct page *page;
1498 size_t skip = i->iov_offset, len;
1499
1500 len = i->bvec->bv_len - skip;
1501 if (len > maxsize)
1502 len = maxsize;
1503 skip += i->bvec->bv_offset;
1504 page = i->bvec->bv_page + skip / PAGE_SIZE;
1505 len += (*start = skip % PAGE_SIZE);
1506 if (len > maxpages * PAGE_SIZE)
1507 len = maxpages * PAGE_SIZE;
1508 *size = len;
1509 return page;
1510}
1511
62a8067a 1512ssize_t iov_iter_get_pages(struct iov_iter *i,
2c80929c 1513 struct page **pages, size_t maxsize, unsigned maxpages,
62a8067a
AV
1514 size_t *start)
1515{
3d671ca6
AV
1516 size_t len;
1517 int n, res;
1518
e5393fae
AV
1519 if (maxsize > i->count)
1520 maxsize = i->count;
3d671ca6
AV
1521 if (!maxsize)
1522 return 0;
e5393fae 1523
3d671ca6 1524 if (likely(iter_is_iovec(i))) {
3337ab08 1525 unsigned int gup_flags = 0;
3d671ca6 1526 unsigned long addr;
e5393fae 1527
3337ab08
AG
1528 if (iov_iter_rw(i) != WRITE)
1529 gup_flags |= FOLL_WRITE;
1530 if (i->nofault)
1531 gup_flags |= FOLL_NOFAULT;
1532
3d671ca6 1533 addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
e5393fae 1534 n = DIV_ROUND_UP(len, PAGE_SIZE);
3337ab08 1535 res = get_user_pages_fast(addr, n, gup_flags, pages);
814a6674 1536 if (unlikely(res <= 0))
e5393fae
AV
1537 return res;
1538 return (res == n ? len : res * PAGE_SIZE) - *start;
3d671ca6
AV
1539 }
1540 if (iov_iter_is_bvec(i)) {
1541 struct page *page;
1542
1543 page = first_bvec_segment(i, &len, start, maxsize, maxpages);
1544 n = DIV_ROUND_UP(len, PAGE_SIZE);
1545 while (n--)
1546 get_page(*pages++ = page++);
1547 return len - *start;
1548 }
1549 if (iov_iter_is_pipe(i))
1550 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1551 if (iov_iter_is_xarray(i))
1552 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1553 return -EFAULT;
62a8067a
AV
1554}
1555EXPORT_SYMBOL(iov_iter_get_pages);
1556
1b17f1f2
AV
1557static struct page **get_pages_array(size_t n)
1558{
752ade68 1559 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1b17f1f2
AV
1560}
1561
241699cd
AV
1562static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1563 struct page ***pages, size_t maxsize,
1564 size_t *start)
1565{
1566 struct page **p;
8cefc107 1567 unsigned int iter_head, npages;
d7760d63 1568 ssize_t n;
241699cd
AV
1569
1570 if (!sanity(i))
1571 return -EFAULT;
1572
8cefc107
DH
1573 data_start(i, &iter_head, start);
1574 /* Amount of free space: some of this one + all after this one */
1575 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
241699cd
AV
1576 n = npages * PAGE_SIZE - *start;
1577 if (maxsize > n)
1578 maxsize = n;
1579 else
1580 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1581 p = get_pages_array(npages);
1582 if (!p)
1583 return -ENOMEM;
8cefc107 1584 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
241699cd
AV
1585 if (n > 0)
1586 *pages = p;
1587 else
1588 kvfree(p);
1589 return n;
1590}
1591
7ff50620
DH
1592static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1593 struct page ***pages, size_t maxsize,
1594 size_t *_start_offset)
1595{
1596 struct page **p;
1597 unsigned nr, offset;
1598 pgoff_t index, count;
6c776766 1599 size_t size = maxsize;
7ff50620
DH
1600 loff_t pos;
1601
1602 if (!size)
1603 return 0;
1604
1605 pos = i->xarray_start + i->iov_offset;
1606 index = pos >> PAGE_SHIFT;
1607 offset = pos & ~PAGE_MASK;
1608 *_start_offset = offset;
1609
1610 count = 1;
1611 if (size > PAGE_SIZE - offset) {
1612 size -= PAGE_SIZE - offset;
1613 count += size >> PAGE_SHIFT;
1614 size &= ~PAGE_MASK;
1615 if (size)
1616 count++;
1617 }
1618
1619 p = get_pages_array(count);
1620 if (!p)
1621 return -ENOMEM;
1622 *pages = p;
1623
1624 nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1625 if (nr == 0)
1626 return 0;
1627
1c27f1fc 1628 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
7ff50620
DH
1629}
1630
62a8067a
AV
1631ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1632 struct page ***pages, size_t maxsize,
1633 size_t *start)
1634{
1b17f1f2 1635 struct page **p;
3d671ca6
AV
1636 size_t len;
1637 int n, res;
1b17f1f2
AV
1638
1639 if (maxsize > i->count)
1640 maxsize = i->count;
3d671ca6
AV
1641 if (!maxsize)
1642 return 0;
1b17f1f2 1643
3d671ca6 1644 if (likely(iter_is_iovec(i))) {
3337ab08 1645 unsigned int gup_flags = 0;
3d671ca6 1646 unsigned long addr;
1b17f1f2 1647
3337ab08
AG
1648 if (iov_iter_rw(i) != WRITE)
1649 gup_flags |= FOLL_WRITE;
1650 if (i->nofault)
1651 gup_flags |= FOLL_NOFAULT;
1652
3d671ca6 1653 addr = first_iovec_segment(i, &len, start, maxsize, ~0U);
1b17f1f2
AV
1654 n = DIV_ROUND_UP(len, PAGE_SIZE);
1655 p = get_pages_array(n);
1656 if (!p)
1657 return -ENOMEM;
3337ab08 1658 res = get_user_pages_fast(addr, n, gup_flags, p);
814a6674 1659 if (unlikely(res <= 0)) {
1b17f1f2 1660 kvfree(p);
814a6674 1661 *pages = NULL;
1b17f1f2
AV
1662 return res;
1663 }
1664 *pages = p;
1665 return (res == n ? len : res * PAGE_SIZE) - *start;
3d671ca6
AV
1666 }
1667 if (iov_iter_is_bvec(i)) {
1668 struct page *page;
1669
1670 page = first_bvec_segment(i, &len, start, maxsize, ~0U);
1671 n = DIV_ROUND_UP(len, PAGE_SIZE);
1672 *pages = p = get_pages_array(n);
1b17f1f2
AV
1673 if (!p)
1674 return -ENOMEM;
3d671ca6
AV
1675 while (n--)
1676 get_page(*p++ = page++);
1677 return len - *start;
1678 }
1679 if (iov_iter_is_pipe(i))
1680 return pipe_get_pages_alloc(i, pages, maxsize, start);
1681 if (iov_iter_is_xarray(i))
1682 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1683 return -EFAULT;
62a8067a
AV
1684}
1685EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1686
a604ec7e
AV
1687size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1688 struct iov_iter *i)
1689{
a604ec7e 1690 __wsum sum, next;
a604ec7e 1691 sum = *csum;
9ea9ce04 1692 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd
AV
1693 WARN_ON(1);
1694 return 0;
1695 }
7baa5099
AV
1696 iterate_and_advance(i, bytes, base, len, off, ({
1697 next = csum_and_copy_from_user(base, addr + off, len);
2495bdcc 1698 sum = csum_block_add(sum, next, off);
7baa5099 1699 next ? 0 : len;
a604ec7e 1700 }), ({
7baa5099 1701 sum = csum_and_memcpy(addr + off, base, len, sum, off);
a604ec7e
AV
1702 })
1703 )
1704 *csum = sum;
1705 return bytes;
1706}
1707EXPORT_SYMBOL(csum_and_copy_from_iter);
1708
52cbd23a 1709size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
a604ec7e
AV
1710 struct iov_iter *i)
1711{
52cbd23a 1712 struct csum_state *csstate = _csstate;
a604ec7e 1713 __wsum sum, next;
78e1f386 1714
78e1f386 1715 if (unlikely(iov_iter_is_discard(i))) {
241699cd
AV
1716 WARN_ON(1); /* for now */
1717 return 0;
1718 }
6852df12
AV
1719
1720 sum = csum_shift(csstate->csum, csstate->off);
1721 if (unlikely(iov_iter_is_pipe(i)))
1722 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1723 else iterate_and_advance(i, bytes, base, len, off, ({
7baa5099 1724 next = csum_and_copy_to_user(addr + off, base, len);
2495bdcc 1725 sum = csum_block_add(sum, next, off);
7baa5099 1726 next ? 0 : len;
a604ec7e 1727 }), ({
7baa5099 1728 sum = csum_and_memcpy(base, addr + off, len, sum, off);
a604ec7e
AV
1729 })
1730 )
594e450b
AV
1731 csstate->csum = csum_shift(sum, csstate->off);
1732 csstate->off += bytes;
a604ec7e
AV
1733 return bytes;
1734}
1735EXPORT_SYMBOL(csum_and_copy_to_iter);
1736
d05f4435
SG
1737size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1738 struct iov_iter *i)
1739{
7999096f 1740#ifdef CONFIG_CRYPTO_HASH
d05f4435
SG
1741 struct ahash_request *hash = hashp;
1742 struct scatterlist sg;
1743 size_t copied;
1744
1745 copied = copy_to_iter(addr, bytes, i);
1746 sg_init_one(&sg, addr, copied);
1747 ahash_request_set_crypt(hash, &sg, NULL, copied);
1748 crypto_ahash_update(hash);
1749 return copied;
27fad74a
Y
1750#else
1751 return 0;
1752#endif
d05f4435
SG
1753}
1754EXPORT_SYMBOL(hash_and_copy_to_iter);
1755
66531c65 1756static int iov_npages(const struct iov_iter *i, int maxpages)
62a8067a 1757{
66531c65
AV
1758 size_t skip = i->iov_offset, size = i->count;
1759 const struct iovec *p;
e0f2dc40
AV
1760 int npages = 0;
1761
66531c65
AV
1762 for (p = i->iov; size; skip = 0, p++) {
1763 unsigned offs = offset_in_page(p->iov_base + skip);
1764 size_t len = min(p->iov_len - skip, size);
e0f2dc40 1765
66531c65
AV
1766 if (len) {
1767 size -= len;
1768 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1769 if (unlikely(npages > maxpages))
1770 return maxpages;
1771 }
1772 }
1773 return npages;
1774}
1775
1776static int bvec_npages(const struct iov_iter *i, int maxpages)
1777{
1778 size_t skip = i->iov_offset, size = i->count;
1779 const struct bio_vec *p;
1780 int npages = 0;
1781
1782 for (p = i->bvec; size; skip = 0, p++) {
1783 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1784 size_t len = min(p->bv_len - skip, size);
1785
1786 size -= len;
1787 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1788 if (unlikely(npages > maxpages))
1789 return maxpages;
1790 }
1791 return npages;
1792}
1793
1794int iov_iter_npages(const struct iov_iter *i, int maxpages)
1795{
1796 if (unlikely(!i->count))
1797 return 0;
1798 /* iovec and kvec have identical layouts */
1799 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1800 return iov_npages(i, maxpages);
1801 if (iov_iter_is_bvec(i))
1802 return bvec_npages(i, maxpages);
1803 if (iov_iter_is_pipe(i)) {
8cefc107 1804 unsigned int iter_head;
66531c65 1805 int npages;
241699cd 1806 size_t off;
241699cd
AV
1807
1808 if (!sanity(i))
1809 return 0;
1810
8cefc107 1811 data_start(i, &iter_head, &off);
241699cd 1812 /* some of this one + all after this one */
66531c65
AV
1813 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1814 return min(npages, maxpages);
1815 }
1816 if (iov_iter_is_xarray(i)) {
e4f8df86
AV
1817 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1818 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
66531c65
AV
1819 return min(npages, maxpages);
1820 }
1821 return 0;
62a8067a 1822}
f67da30c 1823EXPORT_SYMBOL(iov_iter_npages);
4b8164b9
AV
1824
1825const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1826{
1827 *new = *old;
00e23707 1828 if (unlikely(iov_iter_is_pipe(new))) {
241699cd
AV
1829 WARN_ON(1);
1830 return NULL;
1831 }
7ff50620 1832 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
9ea9ce04 1833 return NULL;
00e23707 1834 if (iov_iter_is_bvec(new))
4b8164b9
AV
1835 return new->bvec = kmemdup(new->bvec,
1836 new->nr_segs * sizeof(struct bio_vec),
1837 flags);
1838 else
1839 /* iovec and kvec have identical layout */
1840 return new->iov = kmemdup(new->iov,
1841 new->nr_segs * sizeof(struct iovec),
1842 flags);
1843}
1844EXPORT_SYMBOL(dup_iter);
bc917be8 1845
bfdc5970
CH
1846static int copy_compat_iovec_from_user(struct iovec *iov,
1847 const struct iovec __user *uvec, unsigned long nr_segs)
1848{
1849 const struct compat_iovec __user *uiov =
1850 (const struct compat_iovec __user *)uvec;
1851 int ret = -EFAULT, i;
1852
a959a978 1853 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
bfdc5970
CH
1854 return -EFAULT;
1855
1856 for (i = 0; i < nr_segs; i++) {
1857 compat_uptr_t buf;
1858 compat_ssize_t len;
1859
1860 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1861 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1862
1863 /* check for compat_size_t not fitting in compat_ssize_t .. */
1864 if (len < 0) {
1865 ret = -EINVAL;
1866 goto uaccess_end;
1867 }
1868 iov[i].iov_base = compat_ptr(buf);
1869 iov[i].iov_len = len;
1870 }
1871
1872 ret = 0;
1873uaccess_end:
1874 user_access_end();
1875 return ret;
1876}
1877
1878static int copy_iovec_from_user(struct iovec *iov,
1879 const struct iovec __user *uvec, unsigned long nr_segs)
fb041b59
DL
1880{
1881 unsigned long seg;
fb041b59 1882
bfdc5970
CH
1883 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1884 return -EFAULT;
1885 for (seg = 0; seg < nr_segs; seg++) {
1886 if ((ssize_t)iov[seg].iov_len < 0)
1887 return -EINVAL;
fb041b59
DL
1888 }
1889
bfdc5970
CH
1890 return 0;
1891}
1892
1893struct iovec *iovec_from_user(const struct iovec __user *uvec,
1894 unsigned long nr_segs, unsigned long fast_segs,
1895 struct iovec *fast_iov, bool compat)
1896{
1897 struct iovec *iov = fast_iov;
1898 int ret;
1899
fb041b59 1900 /*
bfdc5970
CH
1901 * SuS says "The readv() function *may* fail if the iovcnt argument was
1902 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1903 * traditionally returned zero for zero segments, so...
fb041b59 1904 */
bfdc5970
CH
1905 if (nr_segs == 0)
1906 return iov;
1907 if (nr_segs > UIO_MAXIOV)
1908 return ERR_PTR(-EINVAL);
fb041b59
DL
1909 if (nr_segs > fast_segs) {
1910 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
bfdc5970
CH
1911 if (!iov)
1912 return ERR_PTR(-ENOMEM);
fb041b59 1913 }
bfdc5970
CH
1914
1915 if (compat)
1916 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1917 else
1918 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1919 if (ret) {
1920 if (iov != fast_iov)
1921 kfree(iov);
1922 return ERR_PTR(ret);
1923 }
1924
1925 return iov;
1926}
1927
1928ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1929 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1930 struct iov_iter *i, bool compat)
1931{
1932 ssize_t total_len = 0;
1933 unsigned long seg;
1934 struct iovec *iov;
1935
1936 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1937 if (IS_ERR(iov)) {
1938 *iovp = NULL;
1939 return PTR_ERR(iov);
fb041b59
DL
1940 }
1941
1942 /*
bfdc5970
CH
1943 * According to the Single Unix Specification we should return EINVAL if
1944 * an element length is < 0 when cast to ssize_t or if the total length
1945 * would overflow the ssize_t return value of the system call.
fb041b59
DL
1946 *
1947 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1948 * overflow case.
1949 */
fb041b59 1950 for (seg = 0; seg < nr_segs; seg++) {
fb041b59
DL
1951 ssize_t len = (ssize_t)iov[seg].iov_len;
1952
bfdc5970
CH
1953 if (!access_ok(iov[seg].iov_base, len)) {
1954 if (iov != *iovp)
1955 kfree(iov);
1956 *iovp = NULL;
1957 return -EFAULT;
fb041b59 1958 }
bfdc5970
CH
1959
1960 if (len > MAX_RW_COUNT - total_len) {
1961 len = MAX_RW_COUNT - total_len;
fb041b59
DL
1962 iov[seg].iov_len = len;
1963 }
bfdc5970 1964 total_len += len;
fb041b59 1965 }
bfdc5970
CH
1966
1967 iov_iter_init(i, type, iov, nr_segs, total_len);
1968 if (iov == *iovp)
1969 *iovp = NULL;
1970 else
1971 *iovp = iov;
1972 return total_len;
fb041b59
DL
1973}
1974
ffecee4f
VN
1975/**
1976 * import_iovec() - Copy an array of &struct iovec from userspace
1977 * into the kernel, check that it is valid, and initialize a new
1978 * &struct iov_iter iterator to access it.
1979 *
1980 * @type: One of %READ or %WRITE.
bfdc5970 1981 * @uvec: Pointer to the userspace array.
ffecee4f
VN
1982 * @nr_segs: Number of elements in userspace array.
1983 * @fast_segs: Number of elements in @iov.
bfdc5970 1984 * @iovp: (input and output parameter) Pointer to pointer to (usually small
ffecee4f
VN
1985 * on-stack) kernel array.
1986 * @i: Pointer to iterator that will be initialized on success.
1987 *
1988 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1989 * then this function places %NULL in *@iov on return. Otherwise, a new
1990 * array will be allocated and the result placed in *@iov. This means that
1991 * the caller may call kfree() on *@iov regardless of whether the small
1992 * on-stack array was used or not (and regardless of whether this function
1993 * returns an error or not).
1994 *
87e5e6da 1995 * Return: Negative error code on error, bytes imported on success
ffecee4f 1996 */
bfdc5970 1997ssize_t import_iovec(int type, const struct iovec __user *uvec,
bc917be8 1998 unsigned nr_segs, unsigned fast_segs,
bfdc5970 1999 struct iovec **iovp, struct iov_iter *i)
bc917be8 2000{
89cd35c5
CH
2001 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
2002 in_compat_syscall());
bc917be8
AV
2003}
2004EXPORT_SYMBOL(import_iovec);
2005
bc917be8
AV
2006int import_single_range(int rw, void __user *buf, size_t len,
2007 struct iovec *iov, struct iov_iter *i)
2008{
2009 if (len > MAX_RW_COUNT)
2010 len = MAX_RW_COUNT;
96d4f267 2011 if (unlikely(!access_ok(buf, len)))
bc917be8
AV
2012 return -EFAULT;
2013
2014 iov->iov_base = buf;
2015 iov->iov_len = len;
2016 iov_iter_init(i, rw, iov, 1, len);
2017 return 0;
2018}
e1267585 2019EXPORT_SYMBOL(import_single_range);
8fb0f47a
JA
2020
2021/**
2022 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
2023 * iov_iter_save_state() was called.
2024 *
2025 * @i: &struct iov_iter to restore
2026 * @state: state to restore from
2027 *
2028 * Used after iov_iter_save_state() to bring restore @i, if operations may
2029 * have advanced it.
2030 *
2031 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
2032 */
2033void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
2034{
2035 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
2036 !iov_iter_is_kvec(i))
2037 return;
2038 i->iov_offset = state->iov_offset;
2039 i->count = state->count;
2040 /*
2041 * For the *vec iters, nr_segs + iov is constant - if we increment
2042 * the vec, then we also decrement the nr_segs count. Hence we don't
2043 * need to track both of these, just one is enough and we can deduct
2044 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
2045 * size, so we can just increment the iov pointer as they are unionzed.
2046 * ITER_BVEC _may_ be the same size on some archs, but on others it is
2047 * not. Be safe and handle it separately.
2048 */
2049 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
2050 if (iov_iter_is_bvec(i))
2051 i->bvec -= state->nr_segs - i->nr_segs;
2052 else
2053 i->iov -= state->nr_segs - i->nr_segs;
2054 i->nr_segs = state->nr_segs;
2055}