iov_iter: separate direction from flavour
[linux-block.git] / lib / iov_iter.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
7999096f 2#include <crypto/hash.h>
4f18cd31 3#include <linux/export.h>
2f8b5444 4#include <linux/bvec.h>
4d0e9df5 5#include <linux/fault-inject-usercopy.h>
4f18cd31
AV
6#include <linux/uio.h>
7#include <linux/pagemap.h>
28961998 8#include <linux/highmem.h>
91f79c43
AV
9#include <linux/slab.h>
10#include <linux/vmalloc.h>
241699cd 11#include <linux/splice.h>
bfdc5970 12#include <linux/compat.h>
a604ec7e 13#include <net/checksum.h>
d05f4435 14#include <linux/scatterlist.h>
d0ef4c36 15#include <linux/instrumented.h>
4f18cd31 16
241699cd
AV
17#define PIPE_PARANOIA /* for now */
18
04a31165
AV
19#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
20 size_t left; \
21 size_t wanted = n; \
22 __p = i->iov; \
23 __v.iov_len = min(n, __p->iov_len - skip); \
24 if (likely(__v.iov_len)) { \
25 __v.iov_base = __p->iov_base + skip; \
26 left = (STEP); \
27 __v.iov_len -= left; \
28 skip += __v.iov_len; \
29 n -= __v.iov_len; \
30 } else { \
31 left = 0; \
32 } \
33 while (unlikely(!left && n)) { \
34 __p++; \
35 __v.iov_len = min(n, __p->iov_len); \
36 if (unlikely(!__v.iov_len)) \
37 continue; \
38 __v.iov_base = __p->iov_base; \
39 left = (STEP); \
40 __v.iov_len -= left; \
41 skip = __v.iov_len; \
42 n -= __v.iov_len; \
43 } \
44 n = wanted - n; \
45}
46
a280455f
AV
47#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
48 size_t wanted = n; \
49 __p = i->kvec; \
50 __v.iov_len = min(n, __p->iov_len - skip); \
51 if (likely(__v.iov_len)) { \
52 __v.iov_base = __p->iov_base + skip; \
53 (void)(STEP); \
54 skip += __v.iov_len; \
55 n -= __v.iov_len; \
56 } \
57 while (unlikely(n)) { \
58 __p++; \
59 __v.iov_len = min(n, __p->iov_len); \
60 if (unlikely(!__v.iov_len)) \
61 continue; \
62 __v.iov_base = __p->iov_base; \
63 (void)(STEP); \
64 skip = __v.iov_len; \
65 n -= __v.iov_len; \
66 } \
67 n = wanted; \
68}
69
1bdc76ae
ML
70#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
71 struct bvec_iter __start; \
72 __start.bi_size = n; \
73 __start.bi_bvec_done = skip; \
74 __start.bi_idx = 0; \
75 for_each_bvec(__v, i->bvec, __bi, __start) { \
04a31165 76 (void)(STEP); \
04a31165 77 } \
04a31165
AV
78}
79
7ff50620
DH
80#define iterate_xarray(i, n, __v, skip, STEP) { \
81 struct page *head = NULL; \
82 size_t wanted = n, seg, offset; \
83 loff_t start = i->xarray_start + skip; \
84 pgoff_t index = start >> PAGE_SHIFT; \
85 int j; \
86 \
87 XA_STATE(xas, i->xarray, index); \
88 \
89 rcu_read_lock(); \
90 xas_for_each(&xas, head, ULONG_MAX) { \
91 if (xas_retry(&xas, head)) \
92 continue; \
93 if (WARN_ON(xa_is_value(head))) \
94 break; \
95 if (WARN_ON(PageHuge(head))) \
96 break; \
97 for (j = (head->index < index) ? index - head->index : 0; \
98 j < thp_nr_pages(head); j++) { \
99 __v.bv_page = head + j; \
100 offset = (i->xarray_start + skip) & ~PAGE_MASK; \
101 seg = PAGE_SIZE - offset; \
102 __v.bv_offset = offset; \
103 __v.bv_len = min(n, seg); \
104 (void)(STEP); \
105 n -= __v.bv_len; \
106 skip += __v.bv_len; \
107 if (n == 0) \
108 break; \
109 } \
110 if (n == 0) \
111 break; \
112 } \
113 rcu_read_unlock(); \
114 n = wanted - n; \
115}
116
117#define iterate_all_kinds(i, n, v, I, B, K, X) { \
33844e66
AV
118 if (likely(n)) { \
119 size_t skip = i->iov_offset; \
28f38db7
AV
120 if (likely(iter_is_iovec(i))) { \
121 const struct iovec *iov; \
122 struct iovec v; \
123 iterate_iovec(i, n, v, iov, skip, (I)) \
124 } else if (iov_iter_is_bvec(i)) { \
33844e66
AV
125 struct bio_vec v; \
126 struct bvec_iter __bi; \
127 iterate_bvec(i, n, v, __bi, skip, (B)) \
28f38db7 128 } else if (iov_iter_is_kvec(i)) { \
33844e66
AV
129 const struct kvec *kvec; \
130 struct kvec v; \
131 iterate_kvec(i, n, v, kvec, skip, (K)) \
28f38db7 132 } else if (iov_iter_is_xarray(i)) { \
7ff50620
DH
133 struct bio_vec v; \
134 iterate_xarray(i, n, v, skip, (X)); \
33844e66 135 } \
04a31165
AV
136 } \
137}
138
7ff50620 139#define iterate_and_advance(i, n, v, I, B, K, X) { \
dd254f5a
AV
140 if (unlikely(i->count < n)) \
141 n = i->count; \
19f18459 142 if (i->count) { \
dd254f5a 143 size_t skip = i->iov_offset; \
28f38db7
AV
144 if (likely(iter_is_iovec(i))) { \
145 const struct iovec *iov; \
146 struct iovec v; \
147 iterate_iovec(i, n, v, iov, skip, (I)) \
148 if (skip == iov->iov_len) { \
149 iov++; \
150 skip = 0; \
151 } \
152 i->nr_segs -= iov - i->iov; \
153 i->iov = iov; \
154 } else if (iov_iter_is_bvec(i)) { \
1bdc76ae 155 const struct bio_vec *bvec = i->bvec; \
dd254f5a 156 struct bio_vec v; \
1bdc76ae
ML
157 struct bvec_iter __bi; \
158 iterate_bvec(i, n, v, __bi, skip, (B)) \
159 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
160 i->nr_segs -= i->bvec - bvec; \
161 skip = __bi.bi_bvec_done; \
28f38db7 162 } else if (iov_iter_is_kvec(i)) { \
dd254f5a
AV
163 const struct kvec *kvec; \
164 struct kvec v; \
165 iterate_kvec(i, n, v, kvec, skip, (K)) \
166 if (skip == kvec->iov_len) { \
167 kvec++; \
168 skip = 0; \
169 } \
170 i->nr_segs -= kvec - i->kvec; \
171 i->kvec = kvec; \
28f38db7 172 } else if (iov_iter_is_xarray(i)) { \
7ff50620
DH
173 struct bio_vec v; \
174 iterate_xarray(i, n, v, skip, (X)) \
7ce2a91e 175 } \
dd254f5a
AV
176 i->count -= n; \
177 i->iov_offset = skip; \
7ce2a91e 178 } \
7ce2a91e
AV
179}
180
09fc68dc
AV
181static int copyout(void __user *to, const void *from, size_t n)
182{
4d0e9df5
AL
183 if (should_fail_usercopy())
184 return n;
96d4f267 185 if (access_ok(to, n)) {
d0ef4c36 186 instrument_copy_to_user(to, from, n);
09fc68dc
AV
187 n = raw_copy_to_user(to, from, n);
188 }
189 return n;
190}
191
192static int copyin(void *to, const void __user *from, size_t n)
193{
4d0e9df5
AL
194 if (should_fail_usercopy())
195 return n;
96d4f267 196 if (access_ok(from, n)) {
d0ef4c36 197 instrument_copy_from_user(to, from, n);
09fc68dc
AV
198 n = raw_copy_from_user(to, from, n);
199 }
200 return n;
201}
202
62a8067a 203static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
4f18cd31
AV
204 struct iov_iter *i)
205{
206 size_t skip, copy, left, wanted;
207 const struct iovec *iov;
208 char __user *buf;
209 void *kaddr, *from;
210
211 if (unlikely(bytes > i->count))
212 bytes = i->count;
213
214 if (unlikely(!bytes))
215 return 0;
216
09fc68dc 217 might_fault();
4f18cd31
AV
218 wanted = bytes;
219 iov = i->iov;
220 skip = i->iov_offset;
221 buf = iov->iov_base + skip;
222 copy = min(bytes, iov->iov_len - skip);
223
3fa6c507 224 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
4f18cd31
AV
225 kaddr = kmap_atomic(page);
226 from = kaddr + offset;
227
228 /* first chunk, usually the only one */
09fc68dc 229 left = copyout(buf, from, copy);
4f18cd31
AV
230 copy -= left;
231 skip += copy;
232 from += copy;
233 bytes -= copy;
234
235 while (unlikely(!left && bytes)) {
236 iov++;
237 buf = iov->iov_base;
238 copy = min(bytes, iov->iov_len);
09fc68dc 239 left = copyout(buf, from, copy);
4f18cd31
AV
240 copy -= left;
241 skip = copy;
242 from += copy;
243 bytes -= copy;
244 }
245 if (likely(!bytes)) {
246 kunmap_atomic(kaddr);
247 goto done;
248 }
249 offset = from - kaddr;
250 buf += copy;
251 kunmap_atomic(kaddr);
252 copy = min(bytes, iov->iov_len - skip);
253 }
254 /* Too bad - revert to non-atomic kmap */
3fa6c507 255
4f18cd31
AV
256 kaddr = kmap(page);
257 from = kaddr + offset;
09fc68dc 258 left = copyout(buf, from, copy);
4f18cd31
AV
259 copy -= left;
260 skip += copy;
261 from += copy;
262 bytes -= copy;
263 while (unlikely(!left && bytes)) {
264 iov++;
265 buf = iov->iov_base;
266 copy = min(bytes, iov->iov_len);
09fc68dc 267 left = copyout(buf, from, copy);
4f18cd31
AV
268 copy -= left;
269 skip = copy;
270 from += copy;
271 bytes -= copy;
272 }
273 kunmap(page);
3fa6c507 274
4f18cd31 275done:
81055e58
AV
276 if (skip == iov->iov_len) {
277 iov++;
278 skip = 0;
279 }
4f18cd31
AV
280 i->count -= wanted - bytes;
281 i->nr_segs -= iov - i->iov;
282 i->iov = iov;
283 i->iov_offset = skip;
284 return wanted - bytes;
285}
4f18cd31 286
62a8067a 287static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
f0d1bec9
AV
288 struct iov_iter *i)
289{
290 size_t skip, copy, left, wanted;
291 const struct iovec *iov;
292 char __user *buf;
293 void *kaddr, *to;
294
295 if (unlikely(bytes > i->count))
296 bytes = i->count;
297
298 if (unlikely(!bytes))
299 return 0;
300
09fc68dc 301 might_fault();
f0d1bec9
AV
302 wanted = bytes;
303 iov = i->iov;
304 skip = i->iov_offset;
305 buf = iov->iov_base + skip;
306 copy = min(bytes, iov->iov_len - skip);
307
3fa6c507 308 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
f0d1bec9
AV
309 kaddr = kmap_atomic(page);
310 to = kaddr + offset;
311
312 /* first chunk, usually the only one */
09fc68dc 313 left = copyin(to, buf, copy);
f0d1bec9
AV
314 copy -= left;
315 skip += copy;
316 to += copy;
317 bytes -= copy;
318
319 while (unlikely(!left && bytes)) {
320 iov++;
321 buf = iov->iov_base;
322 copy = min(bytes, iov->iov_len);
09fc68dc 323 left = copyin(to, buf, copy);
f0d1bec9
AV
324 copy -= left;
325 skip = copy;
326 to += copy;
327 bytes -= copy;
328 }
329 if (likely(!bytes)) {
330 kunmap_atomic(kaddr);
331 goto done;
332 }
333 offset = to - kaddr;
334 buf += copy;
335 kunmap_atomic(kaddr);
336 copy = min(bytes, iov->iov_len - skip);
337 }
338 /* Too bad - revert to non-atomic kmap */
3fa6c507 339
f0d1bec9
AV
340 kaddr = kmap(page);
341 to = kaddr + offset;
09fc68dc 342 left = copyin(to, buf, copy);
f0d1bec9
AV
343 copy -= left;
344 skip += copy;
345 to += copy;
346 bytes -= copy;
347 while (unlikely(!left && bytes)) {
348 iov++;
349 buf = iov->iov_base;
350 copy = min(bytes, iov->iov_len);
09fc68dc 351 left = copyin(to, buf, copy);
f0d1bec9
AV
352 copy -= left;
353 skip = copy;
354 to += copy;
355 bytes -= copy;
356 }
357 kunmap(page);
3fa6c507 358
f0d1bec9 359done:
81055e58
AV
360 if (skip == iov->iov_len) {
361 iov++;
362 skip = 0;
363 }
f0d1bec9
AV
364 i->count -= wanted - bytes;
365 i->nr_segs -= iov - i->iov;
366 i->iov = iov;
367 i->iov_offset = skip;
368 return wanted - bytes;
369}
f0d1bec9 370
241699cd
AV
371#ifdef PIPE_PARANOIA
372static bool sanity(const struct iov_iter *i)
373{
374 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
375 unsigned int p_head = pipe->head;
376 unsigned int p_tail = pipe->tail;
377 unsigned int p_mask = pipe->ring_size - 1;
378 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
379 unsigned int i_head = i->head;
380 unsigned int idx;
381
241699cd
AV
382 if (i->iov_offset) {
383 struct pipe_buffer *p;
8cefc107 384 if (unlikely(p_occupancy == 0))
241699cd 385 goto Bad; // pipe must be non-empty
8cefc107 386 if (unlikely(i_head != p_head - 1))
241699cd
AV
387 goto Bad; // must be at the last buffer...
388
8cefc107 389 p = &pipe->bufs[i_head & p_mask];
241699cd
AV
390 if (unlikely(p->offset + p->len != i->iov_offset))
391 goto Bad; // ... at the end of segment
392 } else {
8cefc107 393 if (i_head != p_head)
241699cd
AV
394 goto Bad; // must be right after the last buffer
395 }
396 return true;
397Bad:
8cefc107
DH
398 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
399 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
400 p_head, p_tail, pipe->ring_size);
401 for (idx = 0; idx < pipe->ring_size; idx++)
241699cd
AV
402 printk(KERN_ERR "[%p %p %d %d]\n",
403 pipe->bufs[idx].ops,
404 pipe->bufs[idx].page,
405 pipe->bufs[idx].offset,
406 pipe->bufs[idx].len);
407 WARN_ON(1);
408 return false;
409}
410#else
411#define sanity(i) true
412#endif
413
241699cd
AV
414static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
415 struct iov_iter *i)
416{
417 struct pipe_inode_info *pipe = i->pipe;
418 struct pipe_buffer *buf;
8cefc107
DH
419 unsigned int p_tail = pipe->tail;
420 unsigned int p_mask = pipe->ring_size - 1;
421 unsigned int i_head = i->head;
241699cd 422 size_t off;
241699cd
AV
423
424 if (unlikely(bytes > i->count))
425 bytes = i->count;
426
427 if (unlikely(!bytes))
428 return 0;
429
430 if (!sanity(i))
431 return 0;
432
433 off = i->iov_offset;
8cefc107 434 buf = &pipe->bufs[i_head & p_mask];
241699cd
AV
435 if (off) {
436 if (offset == off && buf->page == page) {
437 /* merge with the last one */
438 buf->len += bytes;
439 i->iov_offset += bytes;
440 goto out;
441 }
8cefc107
DH
442 i_head++;
443 buf = &pipe->bufs[i_head & p_mask];
241699cd 444 }
6718b6f8 445 if (pipe_full(i_head, p_tail, pipe->max_usage))
241699cd 446 return 0;
8cefc107 447
241699cd 448 buf->ops = &page_cache_pipe_buf_ops;
8cefc107
DH
449 get_page(page);
450 buf->page = page;
241699cd
AV
451 buf->offset = offset;
452 buf->len = bytes;
8cefc107
DH
453
454 pipe->head = i_head + 1;
241699cd 455 i->iov_offset = offset + bytes;
8cefc107 456 i->head = i_head;
241699cd
AV
457out:
458 i->count -= bytes;
459 return bytes;
460}
461
171a0203
AA
462/*
463 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
464 * bytes. For each iovec, fault in each page that constitutes the iovec.
465 *
466 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
467 * because it is an invalid address).
468 */
d4690f1e 469int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
171a0203
AA
470{
471 size_t skip = i->iov_offset;
472 const struct iovec *iov;
473 int err;
474 struct iovec v;
475
0e8f0d67 476 if (iter_is_iovec(i)) {
171a0203 477 iterate_iovec(i, bytes, v, iov, skip, ({
4bce9f6e 478 err = fault_in_pages_readable(v.iov_base, v.iov_len);
171a0203
AA
479 if (unlikely(err))
480 return err;
481 0;}))
482 }
483 return 0;
484}
d4690f1e 485EXPORT_SYMBOL(iov_iter_fault_in_readable);
171a0203 486
aa563d7b 487void iov_iter_init(struct iov_iter *i, unsigned int direction,
71d8e532
AV
488 const struct iovec *iov, unsigned long nr_segs,
489 size_t count)
490{
aa563d7b 491 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
492 WARN_ON_ONCE(uaccess_kernel());
493 *i = (struct iov_iter) {
494 .iter_type = ITER_IOVEC,
495 .data_source = direction,
496 .iov = iov,
497 .nr_segs = nr_segs,
498 .iov_offset = 0,
499 .count = count
500 };
71d8e532
AV
501}
502EXPORT_SYMBOL(iov_iter_init);
7b2c99d1 503
241699cd
AV
504static inline bool allocated(struct pipe_buffer *buf)
505{
506 return buf->ops == &default_pipe_buf_ops;
507}
508
8cefc107
DH
509static inline void data_start(const struct iov_iter *i,
510 unsigned int *iter_headp, size_t *offp)
241699cd 511{
8cefc107
DH
512 unsigned int p_mask = i->pipe->ring_size - 1;
513 unsigned int iter_head = i->head;
241699cd 514 size_t off = i->iov_offset;
8cefc107
DH
515
516 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
517 off == PAGE_SIZE)) {
518 iter_head++;
241699cd
AV
519 off = 0;
520 }
8cefc107 521 *iter_headp = iter_head;
241699cd
AV
522 *offp = off;
523}
524
525static size_t push_pipe(struct iov_iter *i, size_t size,
8cefc107 526 int *iter_headp, size_t *offp)
241699cd
AV
527{
528 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
529 unsigned int p_tail = pipe->tail;
530 unsigned int p_mask = pipe->ring_size - 1;
531 unsigned int iter_head;
241699cd 532 size_t off;
241699cd
AV
533 ssize_t left;
534
535 if (unlikely(size > i->count))
536 size = i->count;
537 if (unlikely(!size))
538 return 0;
539
540 left = size;
8cefc107
DH
541 data_start(i, &iter_head, &off);
542 *iter_headp = iter_head;
241699cd
AV
543 *offp = off;
544 if (off) {
545 left -= PAGE_SIZE - off;
546 if (left <= 0) {
8cefc107 547 pipe->bufs[iter_head & p_mask].len += size;
241699cd
AV
548 return size;
549 }
8cefc107
DH
550 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
551 iter_head++;
241699cd 552 }
6718b6f8 553 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
8cefc107 554 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
241699cd
AV
555 struct page *page = alloc_page(GFP_USER);
556 if (!page)
557 break;
8cefc107
DH
558
559 buf->ops = &default_pipe_buf_ops;
560 buf->page = page;
561 buf->offset = 0;
562 buf->len = min_t(ssize_t, left, PAGE_SIZE);
563 left -= buf->len;
564 iter_head++;
565 pipe->head = iter_head;
566
567 if (left == 0)
241699cd 568 return size;
241699cd
AV
569 }
570 return size - left;
571}
572
573static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
574 struct iov_iter *i)
575{
576 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
577 unsigned int p_mask = pipe->ring_size - 1;
578 unsigned int i_head;
241699cd 579 size_t n, off;
241699cd
AV
580
581 if (!sanity(i))
582 return 0;
583
8cefc107 584 bytes = n = push_pipe(i, bytes, &i_head, &off);
241699cd
AV
585 if (unlikely(!n))
586 return 0;
8cefc107 587 do {
241699cd 588 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
8cefc107
DH
589 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
590 i->head = i_head;
241699cd
AV
591 i->iov_offset = off + chunk;
592 n -= chunk;
593 addr += chunk;
8cefc107
DH
594 off = 0;
595 i_head++;
596 } while (n);
241699cd
AV
597 i->count -= bytes;
598 return bytes;
599}
600
f9152895
AV
601static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
602 __wsum sum, size_t off)
603{
cc44c17b 604 __wsum next = csum_partial_copy_nocheck(from, to, len);
f9152895
AV
605 return csum_block_add(sum, next, off);
606}
607
78e1f386 608static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
52cbd23a
WB
609 struct csum_state *csstate,
610 struct iov_iter *i)
78e1f386
AV
611{
612 struct pipe_inode_info *pipe = i->pipe;
8cefc107 613 unsigned int p_mask = pipe->ring_size - 1;
52cbd23a
WB
614 __wsum sum = csstate->csum;
615 size_t off = csstate->off;
8cefc107 616 unsigned int i_head;
78e1f386 617 size_t n, r;
78e1f386
AV
618
619 if (!sanity(i))
620 return 0;
621
8cefc107 622 bytes = n = push_pipe(i, bytes, &i_head, &r);
78e1f386
AV
623 if (unlikely(!n))
624 return 0;
8cefc107 625 do {
78e1f386 626 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
8cefc107 627 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
f9152895 628 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
78e1f386 629 kunmap_atomic(p);
8cefc107 630 i->head = i_head;
78e1f386
AV
631 i->iov_offset = r + chunk;
632 n -= chunk;
633 off += chunk;
634 addr += chunk;
8cefc107
DH
635 r = 0;
636 i_head++;
637 } while (n);
78e1f386 638 i->count -= bytes;
52cbd23a
WB
639 csstate->csum = sum;
640 csstate->off = off;
78e1f386
AV
641 return bytes;
642}
643
aa28de27 644size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
62a8067a 645{
36f7a8a4 646 const char *from = addr;
00e23707 647 if (unlikely(iov_iter_is_pipe(i)))
241699cd 648 return copy_pipe_to_iter(addr, bytes, i);
09fc68dc
AV
649 if (iter_is_iovec(i))
650 might_fault();
3d4d3e48 651 iterate_and_advance(i, bytes, v,
09fc68dc 652 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
3d4d3e48 653 memcpy_to_page(v.bv_page, v.bv_offset,
a280455f 654 (from += v.bv_len) - v.bv_len, v.bv_len),
7ff50620
DH
655 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
656 memcpy_to_page(v.bv_page, v.bv_offset,
657 (from += v.bv_len) - v.bv_len, v.bv_len)
3d4d3e48 658 )
62a8067a 659
3d4d3e48 660 return bytes;
c35e0248 661}
aa28de27 662EXPORT_SYMBOL(_copy_to_iter);
c35e0248 663
ec6347bb
DW
664#ifdef CONFIG_ARCH_HAS_COPY_MC
665static int copyout_mc(void __user *to, const void *from, size_t n)
8780356e 666{
96d4f267 667 if (access_ok(to, n)) {
d0ef4c36 668 instrument_copy_to_user(to, from, n);
ec6347bb 669 n = copy_mc_to_user((__force void *) to, from, n);
8780356e
DW
670 }
671 return n;
672}
673
ec6347bb 674static unsigned long copy_mc_to_page(struct page *page, size_t offset,
8780356e
DW
675 const char *from, size_t len)
676{
677 unsigned long ret;
678 char *to;
679
680 to = kmap_atomic(page);
ec6347bb 681 ret = copy_mc_to_kernel(to + offset, from, len);
8780356e
DW
682 kunmap_atomic(to);
683
684 return ret;
685}
686
ec6347bb 687static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
ca146f6f
DW
688 struct iov_iter *i)
689{
690 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
691 unsigned int p_mask = pipe->ring_size - 1;
692 unsigned int i_head;
ca146f6f 693 size_t n, off, xfer = 0;
ca146f6f
DW
694
695 if (!sanity(i))
696 return 0;
697
8cefc107 698 bytes = n = push_pipe(i, bytes, &i_head, &off);
ca146f6f
DW
699 if (unlikely(!n))
700 return 0;
8cefc107 701 do {
ca146f6f
DW
702 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
703 unsigned long rem;
704
ec6347bb 705 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
8cefc107
DH
706 off, addr, chunk);
707 i->head = i_head;
ca146f6f
DW
708 i->iov_offset = off + chunk - rem;
709 xfer += chunk - rem;
710 if (rem)
711 break;
712 n -= chunk;
713 addr += chunk;
8cefc107
DH
714 off = 0;
715 i_head++;
716 } while (n);
ca146f6f
DW
717 i->count -= xfer;
718 return xfer;
719}
720
bf3eeb9b 721/**
ec6347bb 722 * _copy_mc_to_iter - copy to iter with source memory error exception handling
bf3eeb9b
DW
723 * @addr: source kernel address
724 * @bytes: total transfer length
725 * @iter: destination iterator
726 *
ec6347bb
DW
727 * The pmem driver deploys this for the dax operation
728 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
729 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
730 * successfully copied.
bf3eeb9b 731 *
ec6347bb 732 * The main differences between this and typical _copy_to_iter().
bf3eeb9b
DW
733 *
734 * * Typical tail/residue handling after a fault retries the copy
735 * byte-by-byte until the fault happens again. Re-triggering machine
736 * checks is potentially fatal so the implementation uses source
737 * alignment and poison alignment assumptions to avoid re-triggering
738 * hardware exceptions.
739 *
740 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
741 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
742 * a short copy.
bf3eeb9b 743 */
ec6347bb 744size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
8780356e
DW
745{
746 const char *from = addr;
747 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
748
00e23707 749 if (unlikely(iov_iter_is_pipe(i)))
ec6347bb 750 return copy_mc_pipe_to_iter(addr, bytes, i);
8780356e
DW
751 if (iter_is_iovec(i))
752 might_fault();
753 iterate_and_advance(i, bytes, v,
ec6347bb
DW
754 copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
755 v.iov_len),
8780356e 756 ({
ec6347bb
DW
757 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
758 (from += v.bv_len) - v.bv_len, v.bv_len);
8780356e
DW
759 if (rem) {
760 curr_addr = (unsigned long) from;
761 bytes = curr_addr - s_addr - rem;
762 return bytes;
763 }
764 }),
765 ({
ec6347bb
DW
766 rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
767 - v.iov_len, v.iov_len);
8780356e
DW
768 if (rem) {
769 curr_addr = (unsigned long) from;
770 bytes = curr_addr - s_addr - rem;
771 return bytes;
772 }
7ff50620
DH
773 }),
774 ({
775 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
776 (from += v.bv_len) - v.bv_len, v.bv_len);
777 if (rem) {
778 curr_addr = (unsigned long) from;
779 bytes = curr_addr - s_addr - rem;
780 rcu_read_unlock();
3d14ec1f
DH
781 i->iov_offset += bytes;
782 i->count -= bytes;
7ff50620
DH
783 return bytes;
784 }
8780356e
DW
785 })
786 )
787
788 return bytes;
789}
ec6347bb
DW
790EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
791#endif /* CONFIG_ARCH_HAS_COPY_MC */
8780356e 792
aa28de27 793size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
c35e0248 794{
0dbca9a4 795 char *to = addr;
00e23707 796 if (unlikely(iov_iter_is_pipe(i))) {
241699cd
AV
797 WARN_ON(1);
798 return 0;
799 }
09fc68dc
AV
800 if (iter_is_iovec(i))
801 might_fault();
0dbca9a4 802 iterate_and_advance(i, bytes, v,
09fc68dc 803 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
0dbca9a4 804 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
a280455f 805 v.bv_offset, v.bv_len),
7ff50620
DH
806 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
807 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
808 v.bv_offset, v.bv_len)
0dbca9a4
AV
809 )
810
811 return bytes;
c35e0248 812}
aa28de27 813EXPORT_SYMBOL(_copy_from_iter);
c35e0248 814
aa28de27 815size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
aa583096
AV
816{
817 char *to = addr;
00e23707 818 if (unlikely(iov_iter_is_pipe(i))) {
241699cd
AV
819 WARN_ON(1);
820 return 0;
821 }
aa583096 822 iterate_and_advance(i, bytes, v,
3f763453 823 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
aa583096
AV
824 v.iov_base, v.iov_len),
825 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
826 v.bv_offset, v.bv_len),
7ff50620
DH
827 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
828 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
829 v.bv_offset, v.bv_len)
aa583096
AV
830 )
831
832 return bytes;
833}
aa28de27 834EXPORT_SYMBOL(_copy_from_iter_nocache);
aa583096 835
0aed55af 836#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
abd08d7d
DW
837/**
838 * _copy_from_iter_flushcache - write destination through cpu cache
839 * @addr: destination kernel address
840 * @bytes: total transfer length
841 * @iter: source iterator
842 *
843 * The pmem driver arranges for filesystem-dax to use this facility via
844 * dax_copy_from_iter() for ensuring that writes to persistent memory
845 * are flushed through the CPU cache. It is differentiated from
846 * _copy_from_iter_nocache() in that guarantees all data is flushed for
847 * all iterator types. The _copy_from_iter_nocache() only attempts to
848 * bypass the cache for the ITER_IOVEC case, and on some archs may use
849 * instructions that strand dirty-data in the cache.
850 */
6a37e940 851size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
0aed55af
DW
852{
853 char *to = addr;
00e23707 854 if (unlikely(iov_iter_is_pipe(i))) {
0aed55af
DW
855 WARN_ON(1);
856 return 0;
857 }
858 iterate_and_advance(i, bytes, v,
859 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
860 v.iov_base, v.iov_len),
861 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
862 v.bv_offset, v.bv_len),
863 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
7ff50620
DH
864 v.iov_len),
865 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
866 v.bv_offset, v.bv_len)
0aed55af
DW
867 )
868
869 return bytes;
870}
6a37e940 871EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
0aed55af
DW
872#endif
873
72e809ed
AV
874static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
875{
6daef95b
ED
876 struct page *head;
877 size_t v = n + offset;
878
879 /*
880 * The general case needs to access the page order in order
881 * to compute the page size.
882 * However, we mostly deal with order-0 pages and thus can
883 * avoid a possible cache line miss for requests that fit all
884 * page orders.
885 */
886 if (n <= v && v <= PAGE_SIZE)
887 return true;
888
889 head = compound_head(page);
890 v += (page - head) << PAGE_SHIFT;
a90bcb86 891
a50b854e 892 if (likely(n <= v && v <= (page_size(head))))
72e809ed
AV
893 return true;
894 WARN_ON(1);
895 return false;
896}
cbbd26b8 897
08aa6479 898static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
62a8067a
AV
899 struct iov_iter *i)
900{
28f38db7
AV
901 if (likely(iter_is_iovec(i)))
902 return copy_page_to_iter_iovec(page, offset, bytes, i);
903 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
d271524a
AV
904 void *kaddr = kmap_atomic(page);
905 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
906 kunmap_atomic(kaddr);
907 return wanted;
28f38db7
AV
908 }
909 if (iov_iter_is_pipe(i))
910 return copy_page_to_iter_pipe(page, offset, bytes, i);
911 if (unlikely(iov_iter_is_discard(i))) {
a506abc7
AV
912 if (unlikely(i->count < bytes))
913 bytes = i->count;
914 i->count -= bytes;
9ea9ce04 915 return bytes;
28f38db7
AV
916 }
917 WARN_ON(1);
918 return 0;
62a8067a 919}
08aa6479
AV
920
921size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
922 struct iov_iter *i)
923{
924 size_t res = 0;
925 if (unlikely(!page_copy_sane(page, offset, bytes)))
926 return 0;
927 page += offset / PAGE_SIZE; // first subpage
928 offset %= PAGE_SIZE;
929 while (1) {
930 size_t n = __copy_page_to_iter(page, offset,
931 min(bytes, (size_t)PAGE_SIZE - offset), i);
932 res += n;
933 bytes -= n;
934 if (!bytes || !n)
935 break;
936 offset += n;
937 if (offset == PAGE_SIZE) {
938 page++;
939 offset = 0;
940 }
941 }
942 return res;
943}
62a8067a
AV
944EXPORT_SYMBOL(copy_page_to_iter);
945
946size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
947 struct iov_iter *i)
948{
72e809ed
AV
949 if (unlikely(!page_copy_sane(page, offset, bytes)))
950 return 0;
28f38db7
AV
951 if (likely(iter_is_iovec(i)))
952 return copy_page_from_iter_iovec(page, offset, bytes, i);
953 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
d271524a 954 void *kaddr = kmap_atomic(page);
aa28de27 955 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
d271524a
AV
956 kunmap_atomic(kaddr);
957 return wanted;
28f38db7
AV
958 }
959 WARN_ON(1);
960 return 0;
62a8067a
AV
961}
962EXPORT_SYMBOL(copy_page_from_iter);
963
241699cd
AV
964static size_t pipe_zero(size_t bytes, struct iov_iter *i)
965{
966 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
967 unsigned int p_mask = pipe->ring_size - 1;
968 unsigned int i_head;
241699cd 969 size_t n, off;
241699cd
AV
970
971 if (!sanity(i))
972 return 0;
973
8cefc107 974 bytes = n = push_pipe(i, bytes, &i_head, &off);
241699cd
AV
975 if (unlikely(!n))
976 return 0;
977
8cefc107 978 do {
241699cd 979 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
8cefc107
DH
980 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
981 i->head = i_head;
241699cd
AV
982 i->iov_offset = off + chunk;
983 n -= chunk;
8cefc107
DH
984 off = 0;
985 i_head++;
986 } while (n);
241699cd
AV
987 i->count -= bytes;
988 return bytes;
989}
990
c35e0248
MW
991size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
992{
00e23707 993 if (unlikely(iov_iter_is_pipe(i)))
241699cd 994 return pipe_zero(bytes, i);
8442fa46 995 iterate_and_advance(i, bytes, v,
09fc68dc 996 clear_user(v.iov_base, v.iov_len),
a280455f 997 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
7ff50620
DH
998 memset(v.iov_base, 0, v.iov_len),
999 memzero_page(v.bv_page, v.bv_offset, v.bv_len)
8442fa46
AV
1000 )
1001
1002 return bytes;
c35e0248
MW
1003}
1004EXPORT_SYMBOL(iov_iter_zero);
1005
62a8067a
AV
1006size_t iov_iter_copy_from_user_atomic(struct page *page,
1007 struct iov_iter *i, unsigned long offset, size_t bytes)
1008{
04a31165 1009 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
72e809ed
AV
1010 if (unlikely(!page_copy_sane(page, offset, bytes))) {
1011 kunmap_atomic(kaddr);
1012 return 0;
1013 }
9ea9ce04 1014 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd
AV
1015 kunmap_atomic(kaddr);
1016 WARN_ON(1);
1017 return 0;
1018 }
04a31165 1019 iterate_all_kinds(i, bytes, v,
09fc68dc 1020 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
04a31165 1021 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
a280455f 1022 v.bv_offset, v.bv_len),
7ff50620
DH
1023 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1024 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1025 v.bv_offset, v.bv_len)
04a31165
AV
1026 )
1027 kunmap_atomic(kaddr);
1028 return bytes;
62a8067a
AV
1029}
1030EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1031
b9dc6f65
AV
1032static inline void pipe_truncate(struct iov_iter *i)
1033{
1034 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
1035 unsigned int p_tail = pipe->tail;
1036 unsigned int p_head = pipe->head;
1037 unsigned int p_mask = pipe->ring_size - 1;
1038
1039 if (!pipe_empty(p_head, p_tail)) {
1040 struct pipe_buffer *buf;
1041 unsigned int i_head = i->head;
b9dc6f65 1042 size_t off = i->iov_offset;
8cefc107 1043
b9dc6f65 1044 if (off) {
8cefc107
DH
1045 buf = &pipe->bufs[i_head & p_mask];
1046 buf->len = off - buf->offset;
1047 i_head++;
b9dc6f65 1048 }
8cefc107
DH
1049 while (p_head != i_head) {
1050 p_head--;
1051 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
b9dc6f65 1052 }
8cefc107
DH
1053
1054 pipe->head = p_head;
b9dc6f65
AV
1055 }
1056}
1057
241699cd
AV
1058static void pipe_advance(struct iov_iter *i, size_t size)
1059{
1060 struct pipe_inode_info *pipe = i->pipe;
241699cd 1061 if (size) {
b9dc6f65 1062 struct pipe_buffer *buf;
8cefc107
DH
1063 unsigned int p_mask = pipe->ring_size - 1;
1064 unsigned int i_head = i->head;
b9dc6f65 1065 size_t off = i->iov_offset, left = size;
8cefc107 1066
241699cd 1067 if (off) /* make it relative to the beginning of buffer */
8cefc107 1068 left += off - pipe->bufs[i_head & p_mask].offset;
241699cd 1069 while (1) {
8cefc107 1070 buf = &pipe->bufs[i_head & p_mask];
b9dc6f65 1071 if (left <= buf->len)
241699cd 1072 break;
b9dc6f65 1073 left -= buf->len;
8cefc107 1074 i_head++;
241699cd 1075 }
8cefc107 1076 i->head = i_head;
b9dc6f65 1077 i->iov_offset = buf->offset + left;
241699cd 1078 }
b9dc6f65
AV
1079 i->count -= size;
1080 /* ... and discard everything past that point */
1081 pipe_truncate(i);
241699cd
AV
1082}
1083
54c8195b
PB
1084static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
1085{
1086 struct bvec_iter bi;
1087
1088 bi.bi_size = i->count;
1089 bi.bi_bvec_done = i->iov_offset;
1090 bi.bi_idx = 0;
1091 bvec_iter_advance(i->bvec, &bi, size);
1092
1093 i->bvec += bi.bi_idx;
1094 i->nr_segs -= bi.bi_idx;
1095 i->count = bi.bi_size;
1096 i->iov_offset = bi.bi_bvec_done;
1097}
1098
62a8067a
AV
1099void iov_iter_advance(struct iov_iter *i, size_t size)
1100{
3b3fc051
AV
1101 if (unlikely(i->count < size))
1102 size = i->count;
00e23707 1103 if (unlikely(iov_iter_is_pipe(i))) {
241699cd
AV
1104 pipe_advance(i, size);
1105 return;
1106 }
9ea9ce04
DH
1107 if (unlikely(iov_iter_is_discard(i))) {
1108 i->count -= size;
1109 return;
1110 }
7ff50620
DH
1111 if (unlikely(iov_iter_is_xarray(i))) {
1112 i->iov_offset += size;
1113 i->count -= size;
1114 return;
1115 }
54c8195b
PB
1116 if (iov_iter_is_bvec(i)) {
1117 iov_iter_bvec_advance(i, size);
1118 return;
1119 }
7ff50620 1120 iterate_and_advance(i, size, v, 0, 0, 0, 0)
62a8067a
AV
1121}
1122EXPORT_SYMBOL(iov_iter_advance);
1123
27c0e374
AV
1124void iov_iter_revert(struct iov_iter *i, size_t unroll)
1125{
1126 if (!unroll)
1127 return;
5b47d59a
AV
1128 if (WARN_ON(unroll > MAX_RW_COUNT))
1129 return;
27c0e374 1130 i->count += unroll;
00e23707 1131 if (unlikely(iov_iter_is_pipe(i))) {
27c0e374 1132 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
1133 unsigned int p_mask = pipe->ring_size - 1;
1134 unsigned int i_head = i->head;
27c0e374
AV
1135 size_t off = i->iov_offset;
1136 while (1) {
8cefc107
DH
1137 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1138 size_t n = off - b->offset;
27c0e374 1139 if (unroll < n) {
4fa55cef 1140 off -= unroll;
27c0e374
AV
1141 break;
1142 }
1143 unroll -= n;
8cefc107 1144 if (!unroll && i_head == i->start_head) {
27c0e374
AV
1145 off = 0;
1146 break;
1147 }
8cefc107
DH
1148 i_head--;
1149 b = &pipe->bufs[i_head & p_mask];
1150 off = b->offset + b->len;
27c0e374
AV
1151 }
1152 i->iov_offset = off;
8cefc107 1153 i->head = i_head;
27c0e374
AV
1154 pipe_truncate(i);
1155 return;
1156 }
9ea9ce04
DH
1157 if (unlikely(iov_iter_is_discard(i)))
1158 return;
27c0e374
AV
1159 if (unroll <= i->iov_offset) {
1160 i->iov_offset -= unroll;
1161 return;
1162 }
1163 unroll -= i->iov_offset;
7ff50620
DH
1164 if (iov_iter_is_xarray(i)) {
1165 BUG(); /* We should never go beyond the start of the specified
1166 * range since we might then be straying into pages that
1167 * aren't pinned.
1168 */
1169 } else if (iov_iter_is_bvec(i)) {
27c0e374
AV
1170 const struct bio_vec *bvec = i->bvec;
1171 while (1) {
1172 size_t n = (--bvec)->bv_len;
1173 i->nr_segs++;
1174 if (unroll <= n) {
1175 i->bvec = bvec;
1176 i->iov_offset = n - unroll;
1177 return;
1178 }
1179 unroll -= n;
1180 }
1181 } else { /* same logics for iovec and kvec */
1182 const struct iovec *iov = i->iov;
1183 while (1) {
1184 size_t n = (--iov)->iov_len;
1185 i->nr_segs++;
1186 if (unroll <= n) {
1187 i->iov = iov;
1188 i->iov_offset = n - unroll;
1189 return;
1190 }
1191 unroll -= n;
1192 }
1193 }
1194}
1195EXPORT_SYMBOL(iov_iter_revert);
1196
62a8067a
AV
1197/*
1198 * Return the count of just the current iov_iter segment.
1199 */
1200size_t iov_iter_single_seg_count(const struct iov_iter *i)
1201{
28f38db7
AV
1202 if (i->nr_segs > 1) {
1203 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1204 return min(i->count, i->iov->iov_len - i->iov_offset);
1205 if (iov_iter_is_bvec(i))
1206 return min(i->count, i->bvec->bv_len - i->iov_offset);
1207 }
1208 return i->count;
62a8067a
AV
1209}
1210EXPORT_SYMBOL(iov_iter_single_seg_count);
1211
aa563d7b 1212void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
05afcb77 1213 const struct kvec *kvec, unsigned long nr_segs,
abb78f87
AV
1214 size_t count)
1215{
aa563d7b 1216 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
1217 *i = (struct iov_iter){
1218 .iter_type = ITER_KVEC,
1219 .data_source = direction,
1220 .kvec = kvec,
1221 .nr_segs = nr_segs,
1222 .iov_offset = 0,
1223 .count = count
1224 };
abb78f87
AV
1225}
1226EXPORT_SYMBOL(iov_iter_kvec);
1227
aa563d7b 1228void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
05afcb77
AV
1229 const struct bio_vec *bvec, unsigned long nr_segs,
1230 size_t count)
1231{
aa563d7b 1232 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
1233 *i = (struct iov_iter){
1234 .iter_type = ITER_BVEC,
1235 .data_source = direction,
1236 .bvec = bvec,
1237 .nr_segs = nr_segs,
1238 .iov_offset = 0,
1239 .count = count
1240 };
05afcb77
AV
1241}
1242EXPORT_SYMBOL(iov_iter_bvec);
1243
aa563d7b 1244void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
241699cd
AV
1245 struct pipe_inode_info *pipe,
1246 size_t count)
1247{
aa563d7b 1248 BUG_ON(direction != READ);
8cefc107 1249 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
8cd54c1c
AV
1250 *i = (struct iov_iter){
1251 .iter_type = ITER_PIPE,
1252 .data_source = false,
1253 .pipe = pipe,
1254 .head = pipe->head,
1255 .start_head = pipe->head,
1256 .iov_offset = 0,
1257 .count = count
1258 };
241699cd
AV
1259}
1260EXPORT_SYMBOL(iov_iter_pipe);
1261
7ff50620
DH
1262/**
1263 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1264 * @i: The iterator to initialise.
1265 * @direction: The direction of the transfer.
1266 * @xarray: The xarray to access.
1267 * @start: The start file position.
1268 * @count: The size of the I/O buffer in bytes.
1269 *
1270 * Set up an I/O iterator to either draw data out of the pages attached to an
1271 * inode or to inject data into those pages. The pages *must* be prevented
1272 * from evaporation, either by taking a ref on them or locking them by the
1273 * caller.
1274 */
1275void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1276 struct xarray *xarray, loff_t start, size_t count)
1277{
1278 BUG_ON(direction & ~1);
8cd54c1c
AV
1279 *i = (struct iov_iter) {
1280 .iter_type = ITER_XARRAY,
1281 .data_source = direction,
1282 .xarray = xarray,
1283 .xarray_start = start,
1284 .count = count,
1285 .iov_offset = 0
1286 };
7ff50620
DH
1287}
1288EXPORT_SYMBOL(iov_iter_xarray);
1289
9ea9ce04
DH
1290/**
1291 * iov_iter_discard - Initialise an I/O iterator that discards data
1292 * @i: The iterator to initialise.
1293 * @direction: The direction of the transfer.
1294 * @count: The size of the I/O buffer in bytes.
1295 *
1296 * Set up an I/O iterator that just discards everything that's written to it.
1297 * It's only available as a READ iterator.
1298 */
1299void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1300{
1301 BUG_ON(direction != READ);
8cd54c1c
AV
1302 *i = (struct iov_iter){
1303 .iter_type = ITER_DISCARD,
1304 .data_source = false,
1305 .count = count,
1306 .iov_offset = 0
1307 };
9ea9ce04
DH
1308}
1309EXPORT_SYMBOL(iov_iter_discard);
1310
62a8067a
AV
1311unsigned long iov_iter_alignment(const struct iov_iter *i)
1312{
04a31165
AV
1313 unsigned long res = 0;
1314 size_t size = i->count;
1315
00e23707 1316 if (unlikely(iov_iter_is_pipe(i))) {
e0ff126e
JK
1317 unsigned int p_mask = i->pipe->ring_size - 1;
1318
8cefc107 1319 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
241699cd
AV
1320 return size | i->iov_offset;
1321 return size;
1322 }
3d14ec1f
DH
1323 if (unlikely(iov_iter_is_xarray(i)))
1324 return (i->xarray_start + i->iov_offset) | i->count;
04a31165
AV
1325 iterate_all_kinds(i, size, v,
1326 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
a280455f 1327 res |= v.bv_offset | v.bv_len,
7ff50620
DH
1328 res |= (unsigned long)v.iov_base | v.iov_len,
1329 res |= v.bv_offset | v.bv_len
04a31165
AV
1330 )
1331 return res;
62a8067a
AV
1332}
1333EXPORT_SYMBOL(iov_iter_alignment);
1334
357f435d
AV
1335unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1336{
33844e66 1337 unsigned long res = 0;
357f435d 1338 size_t size = i->count;
357f435d 1339
9ea9ce04 1340 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd
AV
1341 WARN_ON(1);
1342 return ~0U;
1343 }
1344
357f435d
AV
1345 iterate_all_kinds(i, size, v,
1346 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1347 (size != v.iov_len ? size : 0), 0),
1348 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1349 (size != v.bv_len ? size : 0)),
1350 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
7ff50620
DH
1351 (size != v.iov_len ? size : 0)),
1352 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1353 (size != v.bv_len ? size : 0))
357f435d 1354 );
33844e66 1355 return res;
357f435d
AV
1356}
1357EXPORT_SYMBOL(iov_iter_gap_alignment);
1358
e76b6312 1359static inline ssize_t __pipe_get_pages(struct iov_iter *i,
241699cd
AV
1360 size_t maxsize,
1361 struct page **pages,
8cefc107 1362 int iter_head,
241699cd
AV
1363 size_t *start)
1364{
1365 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
1366 unsigned int p_mask = pipe->ring_size - 1;
1367 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
241699cd
AV
1368 if (!n)
1369 return -EFAULT;
1370
1371 maxsize = n;
1372 n += *start;
1689c73a 1373 while (n > 0) {
8cefc107
DH
1374 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1375 iter_head++;
241699cd
AV
1376 n -= PAGE_SIZE;
1377 }
1378
1379 return maxsize;
1380}
1381
1382static ssize_t pipe_get_pages(struct iov_iter *i,
1383 struct page **pages, size_t maxsize, unsigned maxpages,
1384 size_t *start)
1385{
8cefc107 1386 unsigned int iter_head, npages;
241699cd 1387 size_t capacity;
241699cd 1388
33844e66
AV
1389 if (!maxsize)
1390 return 0;
1391
241699cd
AV
1392 if (!sanity(i))
1393 return -EFAULT;
1394
8cefc107
DH
1395 data_start(i, &iter_head, start);
1396 /* Amount of free space: some of this one + all after this one */
1397 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1398 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
241699cd 1399
8cefc107 1400 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
241699cd
AV
1401}
1402
7ff50620
DH
1403static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1404 pgoff_t index, unsigned int nr_pages)
1405{
1406 XA_STATE(xas, xa, index);
1407 struct page *page;
1408 unsigned int ret = 0;
1409
1410 rcu_read_lock();
1411 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1412 if (xas_retry(&xas, page))
1413 continue;
1414
1415 /* Has the page moved or been split? */
1416 if (unlikely(page != xas_reload(&xas))) {
1417 xas_reset(&xas);
1418 continue;
1419 }
1420
1421 pages[ret] = find_subpage(page, xas.xa_index);
1422 get_page(pages[ret]);
1423 if (++ret == nr_pages)
1424 break;
1425 }
1426 rcu_read_unlock();
1427 return ret;
1428}
1429
1430static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1431 struct page **pages, size_t maxsize,
1432 unsigned maxpages, size_t *_start_offset)
1433{
1434 unsigned nr, offset;
1435 pgoff_t index, count;
1436 size_t size = maxsize, actual;
1437 loff_t pos;
1438
1439 if (!size || !maxpages)
1440 return 0;
1441
1442 pos = i->xarray_start + i->iov_offset;
1443 index = pos >> PAGE_SHIFT;
1444 offset = pos & ~PAGE_MASK;
1445 *_start_offset = offset;
1446
1447 count = 1;
1448 if (size > PAGE_SIZE - offset) {
1449 size -= PAGE_SIZE - offset;
1450 count += size >> PAGE_SHIFT;
1451 size &= ~PAGE_MASK;
1452 if (size)
1453 count++;
1454 }
1455
1456 if (count > maxpages)
1457 count = maxpages;
1458
1459 nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1460 if (nr == 0)
1461 return 0;
1462
1463 actual = PAGE_SIZE * nr;
1464 actual -= offset;
1465 if (nr == count && size > 0) {
1466 unsigned last_offset = (nr > 1) ? 0 : offset;
1467 actual -= PAGE_SIZE - (last_offset + size);
1468 }
1469 return actual;
1470}
1471
62a8067a 1472ssize_t iov_iter_get_pages(struct iov_iter *i,
2c80929c 1473 struct page **pages, size_t maxsize, unsigned maxpages,
62a8067a
AV
1474 size_t *start)
1475{
e5393fae
AV
1476 if (maxsize > i->count)
1477 maxsize = i->count;
1478
00e23707 1479 if (unlikely(iov_iter_is_pipe(i)))
241699cd 1480 return pipe_get_pages(i, pages, maxsize, maxpages, start);
7ff50620
DH
1481 if (unlikely(iov_iter_is_xarray(i)))
1482 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
9ea9ce04
DH
1483 if (unlikely(iov_iter_is_discard(i)))
1484 return -EFAULT;
1485
e5393fae
AV
1486 iterate_all_kinds(i, maxsize, v, ({
1487 unsigned long addr = (unsigned long)v.iov_base;
1488 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1489 int n;
1490 int res;
1491
1492 if (len > maxpages * PAGE_SIZE)
1493 len = maxpages * PAGE_SIZE;
1494 addr &= ~(PAGE_SIZE - 1);
1495 n = DIV_ROUND_UP(len, PAGE_SIZE);
73b0140b
IW
1496 res = get_user_pages_fast(addr, n,
1497 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
1498 pages);
e5393fae
AV
1499 if (unlikely(res < 0))
1500 return res;
1501 return (res == n ? len : res * PAGE_SIZE) - *start;
1502 0;}),({
1503 /* can't be more than PAGE_SIZE */
1504 *start = v.bv_offset;
1505 get_page(*pages = v.bv_page);
1506 return v.bv_len;
a280455f
AV
1507 }),({
1508 return -EFAULT;
7ff50620
DH
1509 }),
1510 0
e5393fae
AV
1511 )
1512 return 0;
62a8067a
AV
1513}
1514EXPORT_SYMBOL(iov_iter_get_pages);
1515
1b17f1f2
AV
1516static struct page **get_pages_array(size_t n)
1517{
752ade68 1518 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1b17f1f2
AV
1519}
1520
241699cd
AV
1521static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1522 struct page ***pages, size_t maxsize,
1523 size_t *start)
1524{
1525 struct page **p;
8cefc107 1526 unsigned int iter_head, npages;
d7760d63 1527 ssize_t n;
241699cd 1528
33844e66
AV
1529 if (!maxsize)
1530 return 0;
1531
241699cd
AV
1532 if (!sanity(i))
1533 return -EFAULT;
1534
8cefc107
DH
1535 data_start(i, &iter_head, start);
1536 /* Amount of free space: some of this one + all after this one */
1537 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
241699cd
AV
1538 n = npages * PAGE_SIZE - *start;
1539 if (maxsize > n)
1540 maxsize = n;
1541 else
1542 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1543 p = get_pages_array(npages);
1544 if (!p)
1545 return -ENOMEM;
8cefc107 1546 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
241699cd
AV
1547 if (n > 0)
1548 *pages = p;
1549 else
1550 kvfree(p);
1551 return n;
1552}
1553
7ff50620
DH
1554static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1555 struct page ***pages, size_t maxsize,
1556 size_t *_start_offset)
1557{
1558 struct page **p;
1559 unsigned nr, offset;
1560 pgoff_t index, count;
1561 size_t size = maxsize, actual;
1562 loff_t pos;
1563
1564 if (!size)
1565 return 0;
1566
1567 pos = i->xarray_start + i->iov_offset;
1568 index = pos >> PAGE_SHIFT;
1569 offset = pos & ~PAGE_MASK;
1570 *_start_offset = offset;
1571
1572 count = 1;
1573 if (size > PAGE_SIZE - offset) {
1574 size -= PAGE_SIZE - offset;
1575 count += size >> PAGE_SHIFT;
1576 size &= ~PAGE_MASK;
1577 if (size)
1578 count++;
1579 }
1580
1581 p = get_pages_array(count);
1582 if (!p)
1583 return -ENOMEM;
1584 *pages = p;
1585
1586 nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1587 if (nr == 0)
1588 return 0;
1589
1590 actual = PAGE_SIZE * nr;
1591 actual -= offset;
1592 if (nr == count && size > 0) {
1593 unsigned last_offset = (nr > 1) ? 0 : offset;
1594 actual -= PAGE_SIZE - (last_offset + size);
1595 }
1596 return actual;
1597}
1598
62a8067a
AV
1599ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1600 struct page ***pages, size_t maxsize,
1601 size_t *start)
1602{
1b17f1f2
AV
1603 struct page **p;
1604
1605 if (maxsize > i->count)
1606 maxsize = i->count;
1607
00e23707 1608 if (unlikely(iov_iter_is_pipe(i)))
241699cd 1609 return pipe_get_pages_alloc(i, pages, maxsize, start);
7ff50620
DH
1610 if (unlikely(iov_iter_is_xarray(i)))
1611 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
9ea9ce04
DH
1612 if (unlikely(iov_iter_is_discard(i)))
1613 return -EFAULT;
1614
1b17f1f2
AV
1615 iterate_all_kinds(i, maxsize, v, ({
1616 unsigned long addr = (unsigned long)v.iov_base;
1617 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1618 int n;
1619 int res;
1620
1621 addr &= ~(PAGE_SIZE - 1);
1622 n = DIV_ROUND_UP(len, PAGE_SIZE);
1623 p = get_pages_array(n);
1624 if (!p)
1625 return -ENOMEM;
73b0140b
IW
1626 res = get_user_pages_fast(addr, n,
1627 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
1b17f1f2
AV
1628 if (unlikely(res < 0)) {
1629 kvfree(p);
1630 return res;
1631 }
1632 *pages = p;
1633 return (res == n ? len : res * PAGE_SIZE) - *start;
1634 0;}),({
1635 /* can't be more than PAGE_SIZE */
1636 *start = v.bv_offset;
1637 *pages = p = get_pages_array(1);
1638 if (!p)
1639 return -ENOMEM;
1640 get_page(*p = v.bv_page);
1641 return v.bv_len;
a280455f
AV
1642 }),({
1643 return -EFAULT;
7ff50620 1644 }), 0
1b17f1f2
AV
1645 )
1646 return 0;
62a8067a
AV
1647}
1648EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1649
a604ec7e
AV
1650size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1651 struct iov_iter *i)
1652{
1653 char *to = addr;
1654 __wsum sum, next;
1655 size_t off = 0;
a604ec7e 1656 sum = *csum;
9ea9ce04 1657 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd
AV
1658 WARN_ON(1);
1659 return 0;
1660 }
a604ec7e 1661 iterate_and_advance(i, bytes, v, ({
cbbd26b8 1662 next = csum_and_copy_from_user(v.iov_base,
a604ec7e 1663 (to += v.iov_len) - v.iov_len,
c693cc46
AV
1664 v.iov_len);
1665 if (next) {
a604ec7e
AV
1666 sum = csum_block_add(sum, next, off);
1667 off += v.iov_len;
1668 }
c693cc46 1669 next ? 0 : v.iov_len;
a604ec7e
AV
1670 }), ({
1671 char *p = kmap_atomic(v.bv_page);
f9152895
AV
1672 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1673 p + v.bv_offset, v.bv_len,
1674 sum, off);
a604ec7e 1675 kunmap_atomic(p);
a604ec7e
AV
1676 off += v.bv_len;
1677 }),({
f9152895
AV
1678 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1679 v.iov_base, v.iov_len,
1680 sum, off);
a604ec7e 1681 off += v.iov_len;
7ff50620
DH
1682 }), ({
1683 char *p = kmap_atomic(v.bv_page);
1684 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1685 p + v.bv_offset, v.bv_len,
1686 sum, off);
1687 kunmap_atomic(p);
1688 off += v.bv_len;
a604ec7e
AV
1689 })
1690 )
1691 *csum = sum;
1692 return bytes;
1693}
1694EXPORT_SYMBOL(csum_and_copy_from_iter);
1695
52cbd23a 1696size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
a604ec7e
AV
1697 struct iov_iter *i)
1698{
52cbd23a 1699 struct csum_state *csstate = _csstate;
36f7a8a4 1700 const char *from = addr;
a604ec7e 1701 __wsum sum, next;
52cbd23a 1702 size_t off;
78e1f386
AV
1703
1704 if (unlikely(iov_iter_is_pipe(i)))
52cbd23a 1705 return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
78e1f386 1706
52cbd23a
WB
1707 sum = csstate->csum;
1708 off = csstate->off;
78e1f386 1709 if (unlikely(iov_iter_is_discard(i))) {
241699cd
AV
1710 WARN_ON(1); /* for now */
1711 return 0;
1712 }
a604ec7e 1713 iterate_and_advance(i, bytes, v, ({
a604ec7e 1714 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
cbbd26b8 1715 v.iov_base,
c693cc46
AV
1716 v.iov_len);
1717 if (next) {
a604ec7e
AV
1718 sum = csum_block_add(sum, next, off);
1719 off += v.iov_len;
1720 }
c693cc46 1721 next ? 0 : v.iov_len;
a604ec7e
AV
1722 }), ({
1723 char *p = kmap_atomic(v.bv_page);
f9152895
AV
1724 sum = csum_and_memcpy(p + v.bv_offset,
1725 (from += v.bv_len) - v.bv_len,
1726 v.bv_len, sum, off);
a604ec7e 1727 kunmap_atomic(p);
a604ec7e
AV
1728 off += v.bv_len;
1729 }),({
f9152895
AV
1730 sum = csum_and_memcpy(v.iov_base,
1731 (from += v.iov_len) - v.iov_len,
1732 v.iov_len, sum, off);
a604ec7e 1733 off += v.iov_len;
7ff50620
DH
1734 }), ({
1735 char *p = kmap_atomic(v.bv_page);
1736 sum = csum_and_memcpy(p + v.bv_offset,
1737 (from += v.bv_len) - v.bv_len,
1738 v.bv_len, sum, off);
1739 kunmap_atomic(p);
1740 off += v.bv_len;
a604ec7e
AV
1741 })
1742 )
52cbd23a
WB
1743 csstate->csum = sum;
1744 csstate->off = off;
a604ec7e
AV
1745 return bytes;
1746}
1747EXPORT_SYMBOL(csum_and_copy_to_iter);
1748
d05f4435
SG
1749size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1750 struct iov_iter *i)
1751{
7999096f 1752#ifdef CONFIG_CRYPTO_HASH
d05f4435
SG
1753 struct ahash_request *hash = hashp;
1754 struct scatterlist sg;
1755 size_t copied;
1756
1757 copied = copy_to_iter(addr, bytes, i);
1758 sg_init_one(&sg, addr, copied);
1759 ahash_request_set_crypt(hash, &sg, NULL, copied);
1760 crypto_ahash_update(hash);
1761 return copied;
27fad74a
Y
1762#else
1763 return 0;
1764#endif
d05f4435
SG
1765}
1766EXPORT_SYMBOL(hash_and_copy_to_iter);
1767
62a8067a
AV
1768int iov_iter_npages(const struct iov_iter *i, int maxpages)
1769{
e0f2dc40
AV
1770 size_t size = i->count;
1771 int npages = 0;
1772
1773 if (!size)
1774 return 0;
9ea9ce04
DH
1775 if (unlikely(iov_iter_is_discard(i)))
1776 return 0;
e0f2dc40 1777
00e23707 1778 if (unlikely(iov_iter_is_pipe(i))) {
241699cd 1779 struct pipe_inode_info *pipe = i->pipe;
8cefc107 1780 unsigned int iter_head;
241699cd 1781 size_t off;
241699cd
AV
1782
1783 if (!sanity(i))
1784 return 0;
1785
8cefc107 1786 data_start(i, &iter_head, &off);
241699cd 1787 /* some of this one + all after this one */
8cefc107 1788 npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
241699cd
AV
1789 if (npages >= maxpages)
1790 return maxpages;
7ff50620
DH
1791 } else if (unlikely(iov_iter_is_xarray(i))) {
1792 unsigned offset;
1793
1794 offset = (i->xarray_start + i->iov_offset) & ~PAGE_MASK;
1795
1796 npages = 1;
1797 if (size > PAGE_SIZE - offset) {
1798 size -= PAGE_SIZE - offset;
1799 npages += size >> PAGE_SHIFT;
1800 size &= ~PAGE_MASK;
1801 if (size)
1802 npages++;
1803 }
1804 if (npages >= maxpages)
1805 return maxpages;
241699cd 1806 } else iterate_all_kinds(i, size, v, ({
e0f2dc40
AV
1807 unsigned long p = (unsigned long)v.iov_base;
1808 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1809 - p / PAGE_SIZE;
1810 if (npages >= maxpages)
1811 return maxpages;
1812 0;}),({
1813 npages++;
1814 if (npages >= maxpages)
1815 return maxpages;
a280455f
AV
1816 }),({
1817 unsigned long p = (unsigned long)v.iov_base;
1818 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1819 - p / PAGE_SIZE;
1820 if (npages >= maxpages)
1821 return maxpages;
7ff50620
DH
1822 }),
1823 0
e0f2dc40
AV
1824 )
1825 return npages;
62a8067a 1826}
f67da30c 1827EXPORT_SYMBOL(iov_iter_npages);
4b8164b9
AV
1828
1829const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1830{
1831 *new = *old;
00e23707 1832 if (unlikely(iov_iter_is_pipe(new))) {
241699cd
AV
1833 WARN_ON(1);
1834 return NULL;
1835 }
7ff50620 1836 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
9ea9ce04 1837 return NULL;
00e23707 1838 if (iov_iter_is_bvec(new))
4b8164b9
AV
1839 return new->bvec = kmemdup(new->bvec,
1840 new->nr_segs * sizeof(struct bio_vec),
1841 flags);
1842 else
1843 /* iovec and kvec have identical layout */
1844 return new->iov = kmemdup(new->iov,
1845 new->nr_segs * sizeof(struct iovec),
1846 flags);
1847}
1848EXPORT_SYMBOL(dup_iter);
bc917be8 1849
bfdc5970
CH
1850static int copy_compat_iovec_from_user(struct iovec *iov,
1851 const struct iovec __user *uvec, unsigned long nr_segs)
1852{
1853 const struct compat_iovec __user *uiov =
1854 (const struct compat_iovec __user *)uvec;
1855 int ret = -EFAULT, i;
1856
a959a978 1857 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
bfdc5970
CH
1858 return -EFAULT;
1859
1860 for (i = 0; i < nr_segs; i++) {
1861 compat_uptr_t buf;
1862 compat_ssize_t len;
1863
1864 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1865 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1866
1867 /* check for compat_size_t not fitting in compat_ssize_t .. */
1868 if (len < 0) {
1869 ret = -EINVAL;
1870 goto uaccess_end;
1871 }
1872 iov[i].iov_base = compat_ptr(buf);
1873 iov[i].iov_len = len;
1874 }
1875
1876 ret = 0;
1877uaccess_end:
1878 user_access_end();
1879 return ret;
1880}
1881
1882static int copy_iovec_from_user(struct iovec *iov,
1883 const struct iovec __user *uvec, unsigned long nr_segs)
fb041b59
DL
1884{
1885 unsigned long seg;
fb041b59 1886
bfdc5970
CH
1887 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1888 return -EFAULT;
1889 for (seg = 0; seg < nr_segs; seg++) {
1890 if ((ssize_t)iov[seg].iov_len < 0)
1891 return -EINVAL;
fb041b59
DL
1892 }
1893
bfdc5970
CH
1894 return 0;
1895}
1896
1897struct iovec *iovec_from_user(const struct iovec __user *uvec,
1898 unsigned long nr_segs, unsigned long fast_segs,
1899 struct iovec *fast_iov, bool compat)
1900{
1901 struct iovec *iov = fast_iov;
1902 int ret;
1903
fb041b59 1904 /*
bfdc5970
CH
1905 * SuS says "The readv() function *may* fail if the iovcnt argument was
1906 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1907 * traditionally returned zero for zero segments, so...
fb041b59 1908 */
bfdc5970
CH
1909 if (nr_segs == 0)
1910 return iov;
1911 if (nr_segs > UIO_MAXIOV)
1912 return ERR_PTR(-EINVAL);
fb041b59
DL
1913 if (nr_segs > fast_segs) {
1914 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
bfdc5970
CH
1915 if (!iov)
1916 return ERR_PTR(-ENOMEM);
fb041b59 1917 }
bfdc5970
CH
1918
1919 if (compat)
1920 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1921 else
1922 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1923 if (ret) {
1924 if (iov != fast_iov)
1925 kfree(iov);
1926 return ERR_PTR(ret);
1927 }
1928
1929 return iov;
1930}
1931
1932ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1933 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1934 struct iov_iter *i, bool compat)
1935{
1936 ssize_t total_len = 0;
1937 unsigned long seg;
1938 struct iovec *iov;
1939
1940 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1941 if (IS_ERR(iov)) {
1942 *iovp = NULL;
1943 return PTR_ERR(iov);
fb041b59
DL
1944 }
1945
1946 /*
bfdc5970
CH
1947 * According to the Single Unix Specification we should return EINVAL if
1948 * an element length is < 0 when cast to ssize_t or if the total length
1949 * would overflow the ssize_t return value of the system call.
fb041b59
DL
1950 *
1951 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1952 * overflow case.
1953 */
fb041b59 1954 for (seg = 0; seg < nr_segs; seg++) {
fb041b59
DL
1955 ssize_t len = (ssize_t)iov[seg].iov_len;
1956
bfdc5970
CH
1957 if (!access_ok(iov[seg].iov_base, len)) {
1958 if (iov != *iovp)
1959 kfree(iov);
1960 *iovp = NULL;
1961 return -EFAULT;
fb041b59 1962 }
bfdc5970
CH
1963
1964 if (len > MAX_RW_COUNT - total_len) {
1965 len = MAX_RW_COUNT - total_len;
fb041b59
DL
1966 iov[seg].iov_len = len;
1967 }
bfdc5970 1968 total_len += len;
fb041b59 1969 }
bfdc5970
CH
1970
1971 iov_iter_init(i, type, iov, nr_segs, total_len);
1972 if (iov == *iovp)
1973 *iovp = NULL;
1974 else
1975 *iovp = iov;
1976 return total_len;
fb041b59
DL
1977}
1978
ffecee4f
VN
1979/**
1980 * import_iovec() - Copy an array of &struct iovec from userspace
1981 * into the kernel, check that it is valid, and initialize a new
1982 * &struct iov_iter iterator to access it.
1983 *
1984 * @type: One of %READ or %WRITE.
bfdc5970 1985 * @uvec: Pointer to the userspace array.
ffecee4f
VN
1986 * @nr_segs: Number of elements in userspace array.
1987 * @fast_segs: Number of elements in @iov.
bfdc5970 1988 * @iovp: (input and output parameter) Pointer to pointer to (usually small
ffecee4f
VN
1989 * on-stack) kernel array.
1990 * @i: Pointer to iterator that will be initialized on success.
1991 *
1992 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1993 * then this function places %NULL in *@iov on return. Otherwise, a new
1994 * array will be allocated and the result placed in *@iov. This means that
1995 * the caller may call kfree() on *@iov regardless of whether the small
1996 * on-stack array was used or not (and regardless of whether this function
1997 * returns an error or not).
1998 *
87e5e6da 1999 * Return: Negative error code on error, bytes imported on success
ffecee4f 2000 */
bfdc5970 2001ssize_t import_iovec(int type, const struct iovec __user *uvec,
bc917be8 2002 unsigned nr_segs, unsigned fast_segs,
bfdc5970 2003 struct iovec **iovp, struct iov_iter *i)
bc917be8 2004{
89cd35c5
CH
2005 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
2006 in_compat_syscall());
bc917be8
AV
2007}
2008EXPORT_SYMBOL(import_iovec);
2009
bc917be8
AV
2010int import_single_range(int rw, void __user *buf, size_t len,
2011 struct iovec *iov, struct iov_iter *i)
2012{
2013 if (len > MAX_RW_COUNT)
2014 len = MAX_RW_COUNT;
96d4f267 2015 if (unlikely(!access_ok(buf, len)))
bc917be8
AV
2016 return -EFAULT;
2017
2018 iov->iov_base = buf;
2019 iov->iov_len = len;
2020 iov_iter_init(i, rw, iov, 1, len);
2021 return 0;
2022}
e1267585 2023EXPORT_SYMBOL(import_single_range);