ITER_PIPE: clean pipe_advance() up
[linux-block.git] / lib / iov_iter.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
7999096f 2#include <crypto/hash.h>
4f18cd31 3#include <linux/export.h>
2f8b5444 4#include <linux/bvec.h>
4d0e9df5 5#include <linux/fault-inject-usercopy.h>
4f18cd31
AV
6#include <linux/uio.h>
7#include <linux/pagemap.h>
28961998 8#include <linux/highmem.h>
91f79c43
AV
9#include <linux/slab.h>
10#include <linux/vmalloc.h>
241699cd 11#include <linux/splice.h>
bfdc5970 12#include <linux/compat.h>
a604ec7e 13#include <net/checksum.h>
d05f4435 14#include <linux/scatterlist.h>
d0ef4c36 15#include <linux/instrumented.h>
4f18cd31 16
241699cd
AV
17#define PIPE_PARANOIA /* for now */
18
fcb14cb1
AV
19/* covers ubuf and kbuf alike */
20#define iterate_buf(i, n, base, len, off, __p, STEP) { \
21 size_t __maybe_unused off = 0; \
22 len = n; \
23 base = __p + i->iov_offset; \
24 len -= (STEP); \
25 i->iov_offset += len; \
26 n = len; \
27}
28
5c67aa90 29/* covers iovec and kvec alike */
a6e4ec7b 30#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
7baa5099 31 size_t off = 0; \
a6e4ec7b 32 size_t skip = i->iov_offset; \
7a1bcb5d 33 do { \
7baa5099
AV
34 len = min(n, __p->iov_len - skip); \
35 if (likely(len)) { \
36 base = __p->iov_base + skip; \
37 len -= (STEP); \
38 off += len; \
39 skip += len; \
40 n -= len; \
7a1bcb5d
AV
41 if (skip < __p->iov_len) \
42 break; \
43 } \
44 __p++; \
45 skip = 0; \
46 } while (n); \
a6e4ec7b 47 i->iov_offset = skip; \
7baa5099 48 n = off; \
04a31165
AV
49}
50
a6e4ec7b 51#define iterate_bvec(i, n, base, len, off, p, STEP) { \
7baa5099 52 size_t off = 0; \
a6e4ec7b 53 unsigned skip = i->iov_offset; \
7491a2bf
AV
54 while (n) { \
55 unsigned offset = p->bv_offset + skip; \
1b4fb5ff 56 unsigned left; \
21b56c84
AV
57 void *kaddr = kmap_local_page(p->bv_page + \
58 offset / PAGE_SIZE); \
7baa5099 59 base = kaddr + offset % PAGE_SIZE; \
a6e4ec7b 60 len = min(min(n, (size_t)(p->bv_len - skip)), \
7491a2bf 61 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
1b4fb5ff 62 left = (STEP); \
21b56c84 63 kunmap_local(kaddr); \
7baa5099
AV
64 len -= left; \
65 off += len; \
66 skip += len; \
7491a2bf
AV
67 if (skip == p->bv_len) { \
68 skip = 0; \
69 p++; \
70 } \
7baa5099 71 n -= len; \
1b4fb5ff
AV
72 if (left) \
73 break; \
7491a2bf 74 } \
a6e4ec7b 75 i->iov_offset = skip; \
7baa5099 76 n = off; \
04a31165
AV
77}
78
a6e4ec7b 79#define iterate_xarray(i, n, base, len, __off, STEP) { \
1b4fb5ff 80 __label__ __out; \
622838f3 81 size_t __off = 0; \
821979f5 82 struct folio *folio; \
a6e4ec7b 83 loff_t start = i->xarray_start + i->iov_offset; \
4b179e9a 84 pgoff_t index = start / PAGE_SIZE; \
7ff50620
DH
85 XA_STATE(xas, i->xarray, index); \
86 \
821979f5 87 len = PAGE_SIZE - offset_in_page(start); \
7baa5099 88 rcu_read_lock(); \
821979f5 89 xas_for_each(&xas, folio, ULONG_MAX) { \
7baa5099 90 unsigned left; \
821979f5
MWO
91 size_t offset; \
92 if (xas_retry(&xas, folio)) \
7baa5099 93 continue; \
821979f5 94 if (WARN_ON(xa_is_value(folio))) \
7baa5099 95 break; \
821979f5 96 if (WARN_ON(folio_test_hugetlb(folio))) \
7baa5099 97 break; \
821979f5
MWO
98 offset = offset_in_folio(folio, start + __off); \
99 while (offset < folio_size(folio)) { \
100 base = kmap_local_folio(folio, offset); \
7baa5099
AV
101 len = min(n, len); \
102 left = (STEP); \
821979f5 103 kunmap_local(base); \
7baa5099
AV
104 len -= left; \
105 __off += len; \
106 n -= len; \
107 if (left || n == 0) \
108 goto __out; \
821979f5
MWO
109 offset += len; \
110 len = PAGE_SIZE; \
7baa5099 111 } \
7ff50620 112 } \
1b4fb5ff 113__out: \
7ff50620 114 rcu_read_unlock(); \
821979f5 115 i->iov_offset += __off; \
622838f3 116 n = __off; \
7ff50620
DH
117}
118
7baa5099 119#define __iterate_and_advance(i, n, base, len, off, I, K) { \
dd254f5a
AV
120 if (unlikely(i->count < n)) \
121 n = i->count; \
f5da8354 122 if (likely(n)) { \
fcb14cb1
AV
123 if (likely(iter_is_ubuf(i))) { \
124 void __user *base; \
125 size_t len; \
126 iterate_buf(i, n, base, len, off, \
127 i->ubuf, (I)) \
128 } else if (likely(iter_is_iovec(i))) { \
5c67aa90 129 const struct iovec *iov = i->iov; \
7baa5099
AV
130 void __user *base; \
131 size_t len; \
132 iterate_iovec(i, n, base, len, off, \
a6e4ec7b 133 iov, (I)) \
28f38db7
AV
134 i->nr_segs -= iov - i->iov; \
135 i->iov = iov; \
136 } else if (iov_iter_is_bvec(i)) { \
1bdc76ae 137 const struct bio_vec *bvec = i->bvec; \
7baa5099
AV
138 void *base; \
139 size_t len; \
140 iterate_bvec(i, n, base, len, off, \
a6e4ec7b 141 bvec, (K)) \
7491a2bf
AV
142 i->nr_segs -= bvec - i->bvec; \
143 i->bvec = bvec; \
28f38db7 144 } else if (iov_iter_is_kvec(i)) { \
5c67aa90 145 const struct kvec *kvec = i->kvec; \
7baa5099
AV
146 void *base; \
147 size_t len; \
148 iterate_iovec(i, n, base, len, off, \
a6e4ec7b 149 kvec, (K)) \
dd254f5a
AV
150 i->nr_segs -= kvec - i->kvec; \
151 i->kvec = kvec; \
28f38db7 152 } else if (iov_iter_is_xarray(i)) { \
7baa5099
AV
153 void *base; \
154 size_t len; \
155 iterate_xarray(i, n, base, len, off, \
a6e4ec7b 156 (K)) \
7ce2a91e 157 } \
dd254f5a 158 i->count -= n; \
7ce2a91e 159 } \
7ce2a91e 160}
7baa5099
AV
161#define iterate_and_advance(i, n, base, len, off, I, K) \
162 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
7ce2a91e 163
09fc68dc
AV
164static int copyout(void __user *to, const void *from, size_t n)
165{
4d0e9df5
AL
166 if (should_fail_usercopy())
167 return n;
96d4f267 168 if (access_ok(to, n)) {
d0ef4c36 169 instrument_copy_to_user(to, from, n);
09fc68dc
AV
170 n = raw_copy_to_user(to, from, n);
171 }
172 return n;
173}
174
175static int copyin(void *to, const void __user *from, size_t n)
176{
4d0e9df5
AL
177 if (should_fail_usercopy())
178 return n;
96d4f267 179 if (access_ok(from, n)) {
d0ef4c36 180 instrument_copy_from_user(to, from, n);
09fc68dc
AV
181 n = raw_copy_from_user(to, from, n);
182 }
183 return n;
184}
185
2dcedb2a
AV
186static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
187 unsigned int slot)
188{
189 return &pipe->bufs[slot & (pipe->ring_size - 1)];
190}
191
241699cd
AV
192#ifdef PIPE_PARANOIA
193static bool sanity(const struct iov_iter *i)
194{
195 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
196 unsigned int p_head = pipe->head;
197 unsigned int p_tail = pipe->tail;
8cefc107
DH
198 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
199 unsigned int i_head = i->head;
200 unsigned int idx;
201
241699cd
AV
202 if (i->iov_offset) {
203 struct pipe_buffer *p;
8cefc107 204 if (unlikely(p_occupancy == 0))
241699cd 205 goto Bad; // pipe must be non-empty
8cefc107 206 if (unlikely(i_head != p_head - 1))
241699cd
AV
207 goto Bad; // must be at the last buffer...
208
2dcedb2a 209 p = pipe_buf(pipe, i_head);
241699cd
AV
210 if (unlikely(p->offset + p->len != i->iov_offset))
211 goto Bad; // ... at the end of segment
212 } else {
8cefc107 213 if (i_head != p_head)
241699cd
AV
214 goto Bad; // must be right after the last buffer
215 }
216 return true;
217Bad:
8cefc107
DH
218 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
219 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
220 p_head, p_tail, pipe->ring_size);
221 for (idx = 0; idx < pipe->ring_size; idx++)
241699cd
AV
222 printk(KERN_ERR "[%p %p %d %d]\n",
223 pipe->bufs[idx].ops,
224 pipe->bufs[idx].page,
225 pipe->bufs[idx].offset,
226 pipe->bufs[idx].len);
227 WARN_ON(1);
228 return false;
229}
230#else
231#define sanity(i) true
232#endif
233
47b7fcae
AV
234static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size)
235{
236 struct page *page = alloc_page(GFP_USER);
237 if (page) {
238 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
239 *buf = (struct pipe_buffer) {
240 .ops = &default_pipe_buf_ops,
241 .page = page,
242 .offset = 0,
243 .len = size
244 };
245 }
246 return page;
247}
248
249static void push_page(struct pipe_inode_info *pipe, struct page *page,
250 unsigned int offset, unsigned int size)
251{
252 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
253 *buf = (struct pipe_buffer) {
254 .ops = &page_cache_pipe_buf_ops,
255 .page = page,
256 .offset = offset,
257 .len = size
258 };
259 get_page(page);
260}
261
8fad7767
AV
262static inline bool allocated(struct pipe_buffer *buf)
263{
264 return buf->ops == &default_pipe_buf_ops;
265}
266
267static struct page *append_pipe(struct iov_iter *i, size_t size,
268 unsigned int *off)
269{
270 struct pipe_inode_info *pipe = i->pipe;
271 size_t offset = i->iov_offset;
272 struct pipe_buffer *buf;
273 struct page *page;
274
275 if (offset && offset < PAGE_SIZE) {
276 // some space in the last buffer; can we add to it?
277 buf = pipe_buf(pipe, pipe->head - 1);
278 if (allocated(buf)) {
279 size = min_t(size_t, size, PAGE_SIZE - offset);
280 buf->len += size;
281 i->iov_offset += size;
282 i->count -= size;
283 *off = offset;
284 return buf->page;
285 }
286 }
287 // OK, we need a new buffer
288 *off = 0;
289 size = min_t(size_t, size, PAGE_SIZE);
290 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
291 return NULL;
292 page = push_anon(pipe, size);
293 if (!page)
294 return NULL;
295 i->head = pipe->head - 1;
296 i->iov_offset = size;
297 i->count -= size;
298 return page;
299}
300
241699cd
AV
301static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
302 struct iov_iter *i)
303{
304 struct pipe_inode_info *pipe = i->pipe;
47b7fcae 305 unsigned int head = pipe->head;
241699cd
AV
306
307 if (unlikely(bytes > i->count))
308 bytes = i->count;
309
310 if (unlikely(!bytes))
311 return 0;
312
313 if (!sanity(i))
314 return 0;
315
47b7fcae
AV
316 if (offset && i->iov_offset == offset) { // could we merge it?
317 struct pipe_buffer *buf = pipe_buf(pipe, head - 1);
318 if (buf->page == page) {
241699cd
AV
319 buf->len += bytes;
320 i->iov_offset += bytes;
47b7fcae
AV
321 i->count -= bytes;
322 return bytes;
241699cd 323 }
241699cd 324 }
47b7fcae 325 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
241699cd 326 return 0;
8cefc107 327
47b7fcae 328 push_page(pipe, page, offset, bytes);
241699cd 329 i->iov_offset = offset + bytes;
47b7fcae 330 i->head = head;
241699cd
AV
331 i->count -= bytes;
332 return bytes;
333}
334
171a0203 335/*
a6294593
AG
336 * fault_in_iov_iter_readable - fault in iov iterator for reading
337 * @i: iterator
338 * @size: maximum length
339 *
171a0203 340 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
a6294593
AG
341 * @size. For each iovec, fault in each page that constitutes the iovec.
342 *
343 * Returns the number of bytes not faulted in (like copy_to_user() and
344 * copy_from_user()).
171a0203 345 *
a6294593 346 * Always returns 0 for non-userspace iterators.
171a0203 347 */
a6294593 348size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
171a0203 349{
fcb14cb1
AV
350 if (iter_is_ubuf(i)) {
351 size_t n = min(size, iov_iter_count(i));
352 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
353 return size - n;
354 } else if (iter_is_iovec(i)) {
a6294593 355 size_t count = min(size, iov_iter_count(i));
8409a0d2
AV
356 const struct iovec *p;
357 size_t skip;
358
a6294593
AG
359 size -= count;
360 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
361 size_t len = min(count, p->iov_len - skip);
362 size_t ret;
8409a0d2
AV
363
364 if (unlikely(!len))
365 continue;
a6294593
AG
366 ret = fault_in_readable(p->iov_base + skip, len);
367 count -= len - ret;
368 if (ret)
369 break;
8409a0d2 370 }
a6294593 371 return count + size;
171a0203
AA
372 }
373 return 0;
374}
a6294593 375EXPORT_SYMBOL(fault_in_iov_iter_readable);
171a0203 376
cdd591fc
AG
377/*
378 * fault_in_iov_iter_writeable - fault in iov iterator for writing
379 * @i: iterator
380 * @size: maximum length
381 *
382 * Faults in the iterator using get_user_pages(), i.e., without triggering
383 * hardware page faults. This is primarily useful when we already know that
384 * some or all of the pages in @i aren't in memory.
385 *
386 * Returns the number of bytes not faulted in, like copy_to_user() and
387 * copy_from_user().
388 *
389 * Always returns 0 for non-user-space iterators.
390 */
391size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
392{
fcb14cb1
AV
393 if (iter_is_ubuf(i)) {
394 size_t n = min(size, iov_iter_count(i));
395 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
396 return size - n;
397 } else if (iter_is_iovec(i)) {
cdd591fc
AG
398 size_t count = min(size, iov_iter_count(i));
399 const struct iovec *p;
400 size_t skip;
401
402 size -= count;
403 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
404 size_t len = min(count, p->iov_len - skip);
405 size_t ret;
406
407 if (unlikely(!len))
408 continue;
409 ret = fault_in_safe_writeable(p->iov_base + skip, len);
410 count -= len - ret;
411 if (ret)
412 break;
413 }
414 return count + size;
415 }
416 return 0;
417}
418EXPORT_SYMBOL(fault_in_iov_iter_writeable);
419
aa563d7b 420void iov_iter_init(struct iov_iter *i, unsigned int direction,
71d8e532
AV
421 const struct iovec *iov, unsigned long nr_segs,
422 size_t count)
423{
aa563d7b 424 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
425 *i = (struct iov_iter) {
426 .iter_type = ITER_IOVEC,
3337ab08 427 .nofault = false,
fcb14cb1 428 .user_backed = true,
8cd54c1c
AV
429 .data_source = direction,
430 .iov = iov,
431 .nr_segs = nr_segs,
432 .iov_offset = 0,
433 .count = count
434 };
71d8e532
AV
435}
436EXPORT_SYMBOL(iov_iter_init);
7b2c99d1 437
8cefc107
DH
438static inline void data_start(const struct iov_iter *i,
439 unsigned int *iter_headp, size_t *offp)
241699cd 440{
8cefc107 441 unsigned int iter_head = i->head;
241699cd 442 size_t off = i->iov_offset;
8cefc107 443
2dcedb2a 444 if (off && (!allocated(pipe_buf(i->pipe, iter_head)) ||
8cefc107
DH
445 off == PAGE_SIZE)) {
446 iter_head++;
241699cd
AV
447 off = 0;
448 }
8cefc107 449 *iter_headp = iter_head;
241699cd
AV
450 *offp = off;
451}
452
241699cd
AV
453static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
454 struct iov_iter *i)
455{
8fad7767 456 unsigned int off, chunk;
241699cd 457
8fad7767
AV
458 if (unlikely(bytes > i->count))
459 bytes = i->count;
460 if (unlikely(!bytes))
241699cd
AV
461 return 0;
462
8fad7767 463 if (!sanity(i))
241699cd 464 return 0;
8fad7767
AV
465
466 for (size_t n = bytes; n; n -= chunk) {
467 struct page *page = append_pipe(i, n, &off);
468 chunk = min_t(size_t, n, PAGE_SIZE - off);
469 if (!page)
470 return bytes - n;
471 memcpy_to_page(page, off, addr, chunk);
241699cd 472 addr += chunk;
8fad7767 473 }
241699cd
AV
474 return bytes;
475}
476
f9152895
AV
477static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
478 __wsum sum, size_t off)
479{
cc44c17b 480 __wsum next = csum_partial_copy_nocheck(from, to, len);
f9152895
AV
481 return csum_block_add(sum, next, off);
482}
483
78e1f386 484static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
6852df12 485 struct iov_iter *i, __wsum *sump)
78e1f386 486{
6852df12
AV
487 __wsum sum = *sump;
488 size_t off = 0;
8fad7767
AV
489 unsigned int chunk, r;
490
491 if (unlikely(bytes > i->count))
492 bytes = i->count;
493 if (unlikely(!bytes))
494 return 0;
78e1f386
AV
495
496 if (!sanity(i))
497 return 0;
498
6852df12 499 while (bytes) {
8fad7767
AV
500 struct page *page = append_pipe(i, bytes, &r);
501 char *p;
502
503 if (!page)
504 break;
505 chunk = min_t(size_t, bytes, PAGE_SIZE - r);
506 p = kmap_local_page(page);
6852df12 507 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
2495bdcc 508 kunmap_local(p);
78e1f386 509 off += chunk;
8fad7767 510 bytes -= chunk;
6852df12
AV
511 }
512 *sump = sum;
6852df12 513 return off;
78e1f386
AV
514}
515
aa28de27 516size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
62a8067a 517{
00e23707 518 if (unlikely(iov_iter_is_pipe(i)))
241699cd 519 return copy_pipe_to_iter(addr, bytes, i);
fcb14cb1 520 if (user_backed_iter(i))
09fc68dc 521 might_fault();
7baa5099
AV
522 iterate_and_advance(i, bytes, base, len, off,
523 copyout(base, addr + off, len),
524 memcpy(base, addr + off, len)
3d4d3e48 525 )
62a8067a 526
3d4d3e48 527 return bytes;
c35e0248 528}
aa28de27 529EXPORT_SYMBOL(_copy_to_iter);
c35e0248 530
ec6347bb
DW
531#ifdef CONFIG_ARCH_HAS_COPY_MC
532static int copyout_mc(void __user *to, const void *from, size_t n)
8780356e 533{
96d4f267 534 if (access_ok(to, n)) {
d0ef4c36 535 instrument_copy_to_user(to, from, n);
ec6347bb 536 n = copy_mc_to_user((__force void *) to, from, n);
8780356e
DW
537 }
538 return n;
539}
540
ec6347bb 541static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
ca146f6f
DW
542 struct iov_iter *i)
543{
8fad7767
AV
544 size_t xfer = 0;
545 unsigned int off, chunk;
546
547 if (unlikely(bytes > i->count))
548 bytes = i->count;
549 if (unlikely(!bytes))
550 return 0;
ca146f6f
DW
551
552 if (!sanity(i))
553 return 0;
554
8fad7767
AV
555 while (bytes) {
556 struct page *page = append_pipe(i, bytes, &off);
ca146f6f 557 unsigned long rem;
8fad7767
AV
558 char *p;
559
560 if (!page)
561 break;
562 chunk = min_t(size_t, bytes, PAGE_SIZE - off);
563 p = kmap_local_page(page);
2a510a74
AV
564 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
565 chunk -= rem;
566 kunmap_local(p);
8fad7767
AV
567 xfer += chunk;
568 bytes -= chunk;
c3497fd0 569 if (rem) {
8fad7767 570 iov_iter_revert(i, rem);
ca146f6f 571 break;
c3497fd0 572 }
2a510a74 573 }
ca146f6f
DW
574 return xfer;
575}
576
bf3eeb9b 577/**
ec6347bb 578 * _copy_mc_to_iter - copy to iter with source memory error exception handling
bf3eeb9b
DW
579 * @addr: source kernel address
580 * @bytes: total transfer length
44e55997 581 * @i: destination iterator
bf3eeb9b 582 *
ec6347bb
DW
583 * The pmem driver deploys this for the dax operation
584 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
585 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
586 * successfully copied.
bf3eeb9b 587 *
ec6347bb 588 * The main differences between this and typical _copy_to_iter().
bf3eeb9b
DW
589 *
590 * * Typical tail/residue handling after a fault retries the copy
591 * byte-by-byte until the fault happens again. Re-triggering machine
592 * checks is potentially fatal so the implementation uses source
593 * alignment and poison alignment assumptions to avoid re-triggering
594 * hardware exceptions.
595 *
596 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
597 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
598 * a short copy.
44e55997
RD
599 *
600 * Return: number of bytes copied (may be %0)
bf3eeb9b 601 */
ec6347bb 602size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
8780356e 603{
00e23707 604 if (unlikely(iov_iter_is_pipe(i)))
ec6347bb 605 return copy_mc_pipe_to_iter(addr, bytes, i);
fcb14cb1 606 if (user_backed_iter(i))
8780356e 607 might_fault();
7baa5099
AV
608 __iterate_and_advance(i, bytes, base, len, off,
609 copyout_mc(base, addr + off, len),
610 copy_mc_to_kernel(base, addr + off, len)
8780356e
DW
611 )
612
613 return bytes;
614}
ec6347bb
DW
615EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
616#endif /* CONFIG_ARCH_HAS_COPY_MC */
8780356e 617
aa28de27 618size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
c35e0248 619{
00e23707 620 if (unlikely(iov_iter_is_pipe(i))) {
241699cd
AV
621 WARN_ON(1);
622 return 0;
623 }
fcb14cb1 624 if (user_backed_iter(i))
09fc68dc 625 might_fault();
7baa5099
AV
626 iterate_and_advance(i, bytes, base, len, off,
627 copyin(addr + off, base, len),
628 memcpy(addr + off, base, len)
0dbca9a4
AV
629 )
630
631 return bytes;
c35e0248 632}
aa28de27 633EXPORT_SYMBOL(_copy_from_iter);
c35e0248 634
aa28de27 635size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
aa583096 636{
00e23707 637 if (unlikely(iov_iter_is_pipe(i))) {
241699cd
AV
638 WARN_ON(1);
639 return 0;
640 }
7baa5099
AV
641 iterate_and_advance(i, bytes, base, len, off,
642 __copy_from_user_inatomic_nocache(addr + off, base, len),
643 memcpy(addr + off, base, len)
aa583096
AV
644 )
645
646 return bytes;
647}
aa28de27 648EXPORT_SYMBOL(_copy_from_iter_nocache);
aa583096 649
0aed55af 650#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
abd08d7d
DW
651/**
652 * _copy_from_iter_flushcache - write destination through cpu cache
653 * @addr: destination kernel address
654 * @bytes: total transfer length
44e55997 655 * @i: source iterator
abd08d7d
DW
656 *
657 * The pmem driver arranges for filesystem-dax to use this facility via
658 * dax_copy_from_iter() for ensuring that writes to persistent memory
659 * are flushed through the CPU cache. It is differentiated from
660 * _copy_from_iter_nocache() in that guarantees all data is flushed for
661 * all iterator types. The _copy_from_iter_nocache() only attempts to
662 * bypass the cache for the ITER_IOVEC case, and on some archs may use
663 * instructions that strand dirty-data in the cache.
44e55997
RD
664 *
665 * Return: number of bytes copied (may be %0)
abd08d7d 666 */
6a37e940 667size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
0aed55af 668{
00e23707 669 if (unlikely(iov_iter_is_pipe(i))) {
0aed55af
DW
670 WARN_ON(1);
671 return 0;
672 }
7baa5099
AV
673 iterate_and_advance(i, bytes, base, len, off,
674 __copy_from_user_flushcache(addr + off, base, len),
675 memcpy_flushcache(addr + off, base, len)
0aed55af
DW
676 )
677
678 return bytes;
679}
6a37e940 680EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
0aed55af
DW
681#endif
682
72e809ed
AV
683static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
684{
6daef95b
ED
685 struct page *head;
686 size_t v = n + offset;
687
688 /*
689 * The general case needs to access the page order in order
690 * to compute the page size.
691 * However, we mostly deal with order-0 pages and thus can
692 * avoid a possible cache line miss for requests that fit all
693 * page orders.
694 */
695 if (n <= v && v <= PAGE_SIZE)
696 return true;
697
698 head = compound_head(page);
699 v += (page - head) << PAGE_SHIFT;
a90bcb86 700
a50b854e 701 if (likely(n <= v && v <= (page_size(head))))
72e809ed
AV
702 return true;
703 WARN_ON(1);
704 return false;
705}
cbbd26b8 706
08aa6479 707static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
62a8067a
AV
708 struct iov_iter *i)
709{
59bb69c6
AV
710 if (unlikely(iov_iter_is_pipe(i))) {
711 return copy_page_to_iter_pipe(page, offset, bytes, i);
712 } else {
c1d4d6a9
AV
713 void *kaddr = kmap_local_page(page);
714 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
715 kunmap_local(kaddr);
d271524a 716 return wanted;
28f38db7 717 }
62a8067a 718}
08aa6479
AV
719
720size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
721 struct iov_iter *i)
722{
723 size_t res = 0;
724 if (unlikely(!page_copy_sane(page, offset, bytes)))
725 return 0;
726 page += offset / PAGE_SIZE; // first subpage
727 offset %= PAGE_SIZE;
728 while (1) {
729 size_t n = __copy_page_to_iter(page, offset,
730 min(bytes, (size_t)PAGE_SIZE - offset), i);
731 res += n;
732 bytes -= n;
733 if (!bytes || !n)
734 break;
735 offset += n;
736 if (offset == PAGE_SIZE) {
737 page++;
738 offset = 0;
739 }
740 }
741 return res;
742}
62a8067a
AV
743EXPORT_SYMBOL(copy_page_to_iter);
744
745size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
746 struct iov_iter *i)
747{
59bb69c6 748 if (page_copy_sane(page, offset, bytes)) {
55ca375c 749 void *kaddr = kmap_local_page(page);
aa28de27 750 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
55ca375c 751 kunmap_local(kaddr);
d271524a 752 return wanted;
28f38db7 753 }
28f38db7 754 return 0;
62a8067a
AV
755}
756EXPORT_SYMBOL(copy_page_from_iter);
757
241699cd
AV
758static size_t pipe_zero(size_t bytes, struct iov_iter *i)
759{
8fad7767 760 unsigned int chunk, off;
241699cd 761
8fad7767
AV
762 if (unlikely(bytes > i->count))
763 bytes = i->count;
764 if (unlikely(!bytes))
241699cd
AV
765 return 0;
766
8fad7767 767 if (!sanity(i))
241699cd
AV
768 return 0;
769
8fad7767
AV
770 for (size_t n = bytes; n; n -= chunk) {
771 struct page *page = append_pipe(i, n, &off);
772 char *p;
773
774 if (!page)
775 return bytes - n;
776 chunk = min_t(size_t, n, PAGE_SIZE - off);
777 p = kmap_local_page(page);
893839fd
AV
778 memset(p + off, 0, chunk);
779 kunmap_local(p);
8fad7767 780 }
241699cd
AV
781 return bytes;
782}
783
c35e0248
MW
784size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
785{
00e23707 786 if (unlikely(iov_iter_is_pipe(i)))
241699cd 787 return pipe_zero(bytes, i);
7baa5099
AV
788 iterate_and_advance(i, bytes, base, len, count,
789 clear_user(base, len),
790 memset(base, 0, len)
8442fa46
AV
791 )
792
793 return bytes;
c35e0248
MW
794}
795EXPORT_SYMBOL(iov_iter_zero);
796
f0b65f39
AV
797size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
798 struct iov_iter *i)
62a8067a 799{
04a31165 800 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
72e809ed
AV
801 if (unlikely(!page_copy_sane(page, offset, bytes))) {
802 kunmap_atomic(kaddr);
803 return 0;
804 }
9ea9ce04 805 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd
AV
806 kunmap_atomic(kaddr);
807 WARN_ON(1);
808 return 0;
809 }
7baa5099
AV
810 iterate_and_advance(i, bytes, base, len, off,
811 copyin(p + off, base, len),
812 memcpy(p + off, base, len)
04a31165
AV
813 )
814 kunmap_atomic(kaddr);
815 return bytes;
62a8067a 816}
f0b65f39 817EXPORT_SYMBOL(copy_page_from_iter_atomic);
62a8067a 818
b9dc6f65
AV
819static inline void pipe_truncate(struct iov_iter *i)
820{
821 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
822 unsigned int p_tail = pipe->tail;
823 unsigned int p_head = pipe->head;
824 unsigned int p_mask = pipe->ring_size - 1;
825
826 if (!pipe_empty(p_head, p_tail)) {
827 struct pipe_buffer *buf;
828 unsigned int i_head = i->head;
b9dc6f65 829 size_t off = i->iov_offset;
8cefc107 830
b9dc6f65 831 if (off) {
8cefc107
DH
832 buf = &pipe->bufs[i_head & p_mask];
833 buf->len = off - buf->offset;
834 i_head++;
b9dc6f65 835 }
8cefc107
DH
836 while (p_head != i_head) {
837 p_head--;
838 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
b9dc6f65 839 }
8cefc107
DH
840
841 pipe->head = p_head;
b9dc6f65
AV
842 }
843}
844
241699cd
AV
845static void pipe_advance(struct iov_iter *i, size_t size)
846{
847 struct pipe_inode_info *pipe = i->pipe;
2c855de9 848 unsigned int off = i->iov_offset;
8cefc107 849
2c855de9
AV
850 if (!off && !size) {
851 pipe_discard_from(pipe, i->start_head); // discard everything
852 return;
853 }
854 i->count -= size;
855 while (1) {
856 struct pipe_buffer *buf = pipe_buf(pipe, i->head);
241699cd 857 if (off) /* make it relative to the beginning of buffer */
2c855de9
AV
858 size += off - buf->offset;
859 if (size <= buf->len) {
860 buf->len = size;
861 i->iov_offset = buf->offset + size;
862 break;
241699cd 863 }
2c855de9
AV
864 size -= buf->len;
865 i->head++;
866 off = 0;
241699cd 867 }
2c855de9 868 pipe_discard_from(pipe, i->head + 1); // discard everything past this one
241699cd
AV
869}
870
54c8195b
PB
871static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
872{
18fa9af7 873 const struct bio_vec *bvec, *end;
54c8195b 874
18fa9af7
AV
875 if (!i->count)
876 return;
877 i->count -= size;
878
879 size += i->iov_offset;
54c8195b 880
18fa9af7
AV
881 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
882 if (likely(size < bvec->bv_len))
883 break;
884 size -= bvec->bv_len;
885 }
886 i->iov_offset = size;
887 i->nr_segs -= bvec - i->bvec;
888 i->bvec = bvec;
54c8195b
PB
889}
890
185ac4d4
AV
891static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
892{
893 const struct iovec *iov, *end;
894
895 if (!i->count)
896 return;
897 i->count -= size;
898
899 size += i->iov_offset; // from beginning of current segment
900 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
901 if (likely(size < iov->iov_len))
902 break;
903 size -= iov->iov_len;
904 }
905 i->iov_offset = size;
906 i->nr_segs -= iov - i->iov;
907 i->iov = iov;
908}
909
62a8067a
AV
910void iov_iter_advance(struct iov_iter *i, size_t size)
911{
3b3fc051
AV
912 if (unlikely(i->count < size))
913 size = i->count;
fcb14cb1
AV
914 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
915 i->iov_offset += size;
916 i->count -= size;
917 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
185ac4d4
AV
918 /* iovec and kvec have identical layouts */
919 iov_iter_iovec_advance(i, size);
920 } else if (iov_iter_is_bvec(i)) {
921 iov_iter_bvec_advance(i, size);
922 } else if (iov_iter_is_pipe(i)) {
241699cd 923 pipe_advance(i, size);
185ac4d4
AV
924 } else if (iov_iter_is_discard(i)) {
925 i->count -= size;
54c8195b 926 }
62a8067a
AV
927}
928EXPORT_SYMBOL(iov_iter_advance);
929
27c0e374
AV
930void iov_iter_revert(struct iov_iter *i, size_t unroll)
931{
932 if (!unroll)
933 return;
5b47d59a
AV
934 if (WARN_ON(unroll > MAX_RW_COUNT))
935 return;
27c0e374 936 i->count += unroll;
00e23707 937 if (unlikely(iov_iter_is_pipe(i))) {
27c0e374 938 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
939 unsigned int p_mask = pipe->ring_size - 1;
940 unsigned int i_head = i->head;
27c0e374
AV
941 size_t off = i->iov_offset;
942 while (1) {
8cefc107
DH
943 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
944 size_t n = off - b->offset;
27c0e374 945 if (unroll < n) {
4fa55cef 946 off -= unroll;
27c0e374
AV
947 break;
948 }
949 unroll -= n;
8cefc107 950 if (!unroll && i_head == i->start_head) {
27c0e374
AV
951 off = 0;
952 break;
953 }
8cefc107
DH
954 i_head--;
955 b = &pipe->bufs[i_head & p_mask];
956 off = b->offset + b->len;
27c0e374
AV
957 }
958 i->iov_offset = off;
8cefc107 959 i->head = i_head;
27c0e374
AV
960 pipe_truncate(i);
961 return;
962 }
9ea9ce04
DH
963 if (unlikely(iov_iter_is_discard(i)))
964 return;
27c0e374
AV
965 if (unroll <= i->iov_offset) {
966 i->iov_offset -= unroll;
967 return;
968 }
969 unroll -= i->iov_offset;
fcb14cb1 970 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
7ff50620
DH
971 BUG(); /* We should never go beyond the start of the specified
972 * range since we might then be straying into pages that
973 * aren't pinned.
974 */
975 } else if (iov_iter_is_bvec(i)) {
27c0e374
AV
976 const struct bio_vec *bvec = i->bvec;
977 while (1) {
978 size_t n = (--bvec)->bv_len;
979 i->nr_segs++;
980 if (unroll <= n) {
981 i->bvec = bvec;
982 i->iov_offset = n - unroll;
983 return;
984 }
985 unroll -= n;
986 }
987 } else { /* same logics for iovec and kvec */
988 const struct iovec *iov = i->iov;
989 while (1) {
990 size_t n = (--iov)->iov_len;
991 i->nr_segs++;
992 if (unroll <= n) {
993 i->iov = iov;
994 i->iov_offset = n - unroll;
995 return;
996 }
997 unroll -= n;
998 }
999 }
1000}
1001EXPORT_SYMBOL(iov_iter_revert);
1002
62a8067a
AV
1003/*
1004 * Return the count of just the current iov_iter segment.
1005 */
1006size_t iov_iter_single_seg_count(const struct iov_iter *i)
1007{
28f38db7
AV
1008 if (i->nr_segs > 1) {
1009 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1010 return min(i->count, i->iov->iov_len - i->iov_offset);
1011 if (iov_iter_is_bvec(i))
1012 return min(i->count, i->bvec->bv_len - i->iov_offset);
1013 }
1014 return i->count;
62a8067a
AV
1015}
1016EXPORT_SYMBOL(iov_iter_single_seg_count);
1017
aa563d7b 1018void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
05afcb77 1019 const struct kvec *kvec, unsigned long nr_segs,
abb78f87
AV
1020 size_t count)
1021{
aa563d7b 1022 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
1023 *i = (struct iov_iter){
1024 .iter_type = ITER_KVEC,
1025 .data_source = direction,
1026 .kvec = kvec,
1027 .nr_segs = nr_segs,
1028 .iov_offset = 0,
1029 .count = count
1030 };
abb78f87
AV
1031}
1032EXPORT_SYMBOL(iov_iter_kvec);
1033
aa563d7b 1034void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
05afcb77
AV
1035 const struct bio_vec *bvec, unsigned long nr_segs,
1036 size_t count)
1037{
aa563d7b 1038 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
1039 *i = (struct iov_iter){
1040 .iter_type = ITER_BVEC,
1041 .data_source = direction,
1042 .bvec = bvec,
1043 .nr_segs = nr_segs,
1044 .iov_offset = 0,
1045 .count = count
1046 };
05afcb77
AV
1047}
1048EXPORT_SYMBOL(iov_iter_bvec);
1049
aa563d7b 1050void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
241699cd
AV
1051 struct pipe_inode_info *pipe,
1052 size_t count)
1053{
aa563d7b 1054 BUG_ON(direction != READ);
8cefc107 1055 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
8cd54c1c
AV
1056 *i = (struct iov_iter){
1057 .iter_type = ITER_PIPE,
1058 .data_source = false,
1059 .pipe = pipe,
1060 .head = pipe->head,
1061 .start_head = pipe->head,
1062 .iov_offset = 0,
1063 .count = count
1064 };
241699cd
AV
1065}
1066EXPORT_SYMBOL(iov_iter_pipe);
1067
7ff50620
DH
1068/**
1069 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1070 * @i: The iterator to initialise.
1071 * @direction: The direction of the transfer.
1072 * @xarray: The xarray to access.
1073 * @start: The start file position.
1074 * @count: The size of the I/O buffer in bytes.
1075 *
1076 * Set up an I/O iterator to either draw data out of the pages attached to an
1077 * inode or to inject data into those pages. The pages *must* be prevented
1078 * from evaporation, either by taking a ref on them or locking them by the
1079 * caller.
1080 */
1081void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1082 struct xarray *xarray, loff_t start, size_t count)
1083{
1084 BUG_ON(direction & ~1);
8cd54c1c
AV
1085 *i = (struct iov_iter) {
1086 .iter_type = ITER_XARRAY,
1087 .data_source = direction,
1088 .xarray = xarray,
1089 .xarray_start = start,
1090 .count = count,
1091 .iov_offset = 0
1092 };
7ff50620
DH
1093}
1094EXPORT_SYMBOL(iov_iter_xarray);
1095
9ea9ce04
DH
1096/**
1097 * iov_iter_discard - Initialise an I/O iterator that discards data
1098 * @i: The iterator to initialise.
1099 * @direction: The direction of the transfer.
1100 * @count: The size of the I/O buffer in bytes.
1101 *
1102 * Set up an I/O iterator that just discards everything that's written to it.
1103 * It's only available as a READ iterator.
1104 */
1105void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1106{
1107 BUG_ON(direction != READ);
8cd54c1c
AV
1108 *i = (struct iov_iter){
1109 .iter_type = ITER_DISCARD,
1110 .data_source = false,
1111 .count = count,
1112 .iov_offset = 0
1113 };
9ea9ce04
DH
1114}
1115EXPORT_SYMBOL(iov_iter_discard);
1116
cfa320f7
KB
1117static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
1118 unsigned len_mask)
1119{
1120 size_t size = i->count;
1121 size_t skip = i->iov_offset;
1122 unsigned k;
1123
1124 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1125 size_t len = i->iov[k].iov_len - skip;
1126
1127 if (len > size)
1128 len = size;
1129 if (len & len_mask)
1130 return false;
1131 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
1132 return false;
1133
1134 size -= len;
1135 if (!size)
1136 break;
1137 }
1138 return true;
1139}
1140
1141static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
1142 unsigned len_mask)
1143{
1144 size_t size = i->count;
1145 unsigned skip = i->iov_offset;
1146 unsigned k;
1147
1148 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1149 size_t len = i->bvec[k].bv_len - skip;
1150
1151 if (len > size)
1152 len = size;
1153 if (len & len_mask)
1154 return false;
1155 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
1156 return false;
1157
1158 size -= len;
1159 if (!size)
1160 break;
1161 }
1162 return true;
1163}
1164
1165/**
1166 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
1167 * are aligned to the parameters.
1168 *
1169 * @i: &struct iov_iter to restore
1170 * @addr_mask: bit mask to check against the iov element's addresses
1171 * @len_mask: bit mask to check against the iov element's lengths
1172 *
1173 * Return: false if any addresses or lengths intersect with the provided masks
1174 */
1175bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
1176 unsigned len_mask)
1177{
fcb14cb1
AV
1178 if (likely(iter_is_ubuf(i))) {
1179 if (i->count & len_mask)
1180 return false;
1181 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
1182 return false;
1183 return true;
1184 }
1185
cfa320f7
KB
1186 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1187 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
1188
1189 if (iov_iter_is_bvec(i))
1190 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
1191
1192 if (iov_iter_is_pipe(i)) {
1193 unsigned int p_mask = i->pipe->ring_size - 1;
1194 size_t size = i->count;
1195
1196 if (size & len_mask)
1197 return false;
1198 if (size && allocated(&i->pipe->bufs[i->head & p_mask])) {
1199 if (i->iov_offset & addr_mask)
1200 return false;
1201 }
1202
1203 return true;
1204 }
1205
1206 if (iov_iter_is_xarray(i)) {
1207 if (i->count & len_mask)
1208 return false;
1209 if ((i->xarray_start + i->iov_offset) & addr_mask)
1210 return false;
1211 }
1212
1213 return true;
1214}
1215EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
1216
9221d2e3 1217static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
62a8067a 1218{
04a31165
AV
1219 unsigned long res = 0;
1220 size_t size = i->count;
9221d2e3
AV
1221 size_t skip = i->iov_offset;
1222 unsigned k;
1223
1224 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1225 size_t len = i->iov[k].iov_len - skip;
1226 if (len) {
1227 res |= (unsigned long)i->iov[k].iov_base + skip;
1228 if (len > size)
1229 len = size;
1230 res |= len;
1231 size -= len;
1232 if (!size)
1233 break;
1234 }
1235 }
1236 return res;
1237}
04a31165 1238
9221d2e3
AV
1239static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1240{
1241 unsigned res = 0;
1242 size_t size = i->count;
1243 unsigned skip = i->iov_offset;
1244 unsigned k;
1245
1246 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1247 size_t len = i->bvec[k].bv_len - skip;
1248 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1249 if (len > size)
1250 len = size;
1251 res |= len;
1252 size -= len;
1253 if (!size)
1254 break;
1255 }
1256 return res;
1257}
1258
1259unsigned long iov_iter_alignment(const struct iov_iter *i)
1260{
fcb14cb1
AV
1261 if (likely(iter_is_ubuf(i))) {
1262 size_t size = i->count;
1263 if (size)
1264 return ((unsigned long)i->ubuf + i->iov_offset) | size;
1265 return 0;
1266 }
1267
9221d2e3
AV
1268 /* iovec and kvec have identical layouts */
1269 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1270 return iov_iter_alignment_iovec(i);
1271
1272 if (iov_iter_is_bvec(i))
1273 return iov_iter_alignment_bvec(i);
1274
1275 if (iov_iter_is_pipe(i)) {
9221d2e3 1276 size_t size = i->count;
e0ff126e 1277
2dcedb2a 1278 if (size && i->iov_offset && allocated(pipe_buf(i->pipe, i->head)))
241699cd
AV
1279 return size | i->iov_offset;
1280 return size;
1281 }
9221d2e3
AV
1282
1283 if (iov_iter_is_xarray(i))
3d14ec1f 1284 return (i->xarray_start + i->iov_offset) | i->count;
9221d2e3
AV
1285
1286 return 0;
62a8067a
AV
1287}
1288EXPORT_SYMBOL(iov_iter_alignment);
1289
357f435d
AV
1290unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1291{
33844e66 1292 unsigned long res = 0;
610c7a71 1293 unsigned long v = 0;
357f435d 1294 size_t size = i->count;
610c7a71 1295 unsigned k;
357f435d 1296
fcb14cb1
AV
1297 if (iter_is_ubuf(i))
1298 return 0;
1299
610c7a71 1300 if (WARN_ON(!iter_is_iovec(i)))
241699cd 1301 return ~0U;
241699cd 1302
610c7a71
AV
1303 for (k = 0; k < i->nr_segs; k++) {
1304 if (i->iov[k].iov_len) {
1305 unsigned long base = (unsigned long)i->iov[k].iov_base;
1306 if (v) // if not the first one
1307 res |= base | v; // this start | previous end
1308 v = base + i->iov[k].iov_len;
1309 if (size <= i->iov[k].iov_len)
1310 break;
1311 size -= i->iov[k].iov_len;
1312 }
1313 }
33844e66 1314 return res;
357f435d
AV
1315}
1316EXPORT_SYMBOL(iov_iter_gap_alignment);
1317
e76b6312 1318static inline ssize_t __pipe_get_pages(struct iov_iter *i,
241699cd
AV
1319 size_t maxsize,
1320 struct page **pages,
e3b42964 1321 size_t off)
241699cd
AV
1322{
1323 struct pipe_inode_info *pipe = i->pipe;
e3b42964 1324 ssize_t left = maxsize;
241699cd 1325
e3b42964 1326 if (off) {
ca591967 1327 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head - 1);
241699cd 1328
e3b42964
AV
1329 get_page(*pages++ = buf->page);
1330 left -= PAGE_SIZE - off;
1331 if (left <= 0) {
1332 buf->len += maxsize;
1333 return maxsize;
1334 }
1335 buf->len = PAGE_SIZE;
1336 }
1337 while (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
1338 struct page *page = push_anon(pipe,
1339 min_t(ssize_t, left, PAGE_SIZE));
1340 if (!page)
1341 break;
1342 get_page(*pages++ = page);
1343 left -= PAGE_SIZE;
1344 if (left <= 0)
1345 return maxsize;
1346 }
1347 return maxsize - left ? : -EFAULT;
241699cd
AV
1348}
1349
1350static ssize_t pipe_get_pages(struct iov_iter *i,
1351 struct page **pages, size_t maxsize, unsigned maxpages,
1352 size_t *start)
1353{
8cefc107 1354 unsigned int iter_head, npages;
241699cd 1355 size_t capacity;
241699cd
AV
1356
1357 if (!sanity(i))
1358 return -EFAULT;
1359
8cefc107
DH
1360 data_start(i, &iter_head, start);
1361 /* Amount of free space: some of this one + all after this one */
1362 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1363 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
241699cd 1364
ca591967 1365 return __pipe_get_pages(i, min(maxsize, capacity), pages, *start);
241699cd
AV
1366}
1367
7ff50620
DH
1368static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1369 pgoff_t index, unsigned int nr_pages)
1370{
1371 XA_STATE(xas, xa, index);
1372 struct page *page;
1373 unsigned int ret = 0;
1374
1375 rcu_read_lock();
1376 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1377 if (xas_retry(&xas, page))
1378 continue;
1379
1380 /* Has the page moved or been split? */
1381 if (unlikely(page != xas_reload(&xas))) {
1382 xas_reset(&xas);
1383 continue;
1384 }
1385
1386 pages[ret] = find_subpage(page, xas.xa_index);
1387 get_page(pages[ret]);
1388 if (++ret == nr_pages)
1389 break;
1390 }
1391 rcu_read_unlock();
1392 return ret;
1393}
1394
1395static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1396 struct page **pages, size_t maxsize,
1397 unsigned maxpages, size_t *_start_offset)
1398{
1399 unsigned nr, offset;
1400 pgoff_t index, count;
6c776766 1401 size_t size = maxsize;
7ff50620
DH
1402 loff_t pos;
1403
1404 if (!size || !maxpages)
1405 return 0;
1406
1407 pos = i->xarray_start + i->iov_offset;
1408 index = pos >> PAGE_SHIFT;
1409 offset = pos & ~PAGE_MASK;
1410 *_start_offset = offset;
1411
1412 count = 1;
1413 if (size > PAGE_SIZE - offset) {
1414 size -= PAGE_SIZE - offset;
1415 count += size >> PAGE_SHIFT;
1416 size &= ~PAGE_MASK;
1417 if (size)
1418 count++;
1419 }
1420
1421 if (count > maxpages)
1422 count = maxpages;
1423
1424 nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1425 if (nr == 0)
1426 return 0;
1427
1c27f1fc 1428 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
7ff50620
DH
1429}
1430
fcb14cb1 1431/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
dd45ab9d 1432static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
3d671ca6
AV
1433{
1434 size_t skip;
1435 long k;
1436
fcb14cb1
AV
1437 if (iter_is_ubuf(i))
1438 return (unsigned long)i->ubuf + i->iov_offset;
1439
3d671ca6 1440 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
3d671ca6
AV
1441 size_t len = i->iov[k].iov_len - skip;
1442
1443 if (unlikely(!len))
1444 continue;
59dbd7d0
AV
1445 if (*size > len)
1446 *size = len;
dd45ab9d 1447 return (unsigned long)i->iov[k].iov_base + skip;
3d671ca6
AV
1448 }
1449 BUG(); // if it had been empty, we wouldn't get called
1450}
1451
1452/* must be done on non-empty ITER_BVEC one */
1453static struct page *first_bvec_segment(const struct iov_iter *i,
59dbd7d0 1454 size_t *size, size_t *start)
3d671ca6
AV
1455{
1456 struct page *page;
1457 size_t skip = i->iov_offset, len;
1458
1459 len = i->bvec->bv_len - skip;
59dbd7d0
AV
1460 if (*size > len)
1461 *size = len;
3d671ca6
AV
1462 skip += i->bvec->bv_offset;
1463 page = i->bvec->bv_page + skip / PAGE_SIZE;
dda8e5d1 1464 *start = skip % PAGE_SIZE;
3d671ca6
AV
1465 return page;
1466}
1467
62a8067a 1468ssize_t iov_iter_get_pages(struct iov_iter *i,
2c80929c 1469 struct page **pages, size_t maxsize, unsigned maxpages,
62a8067a
AV
1470 size_t *start)
1471{
3d671ca6
AV
1472 int n, res;
1473
e5393fae
AV
1474 if (maxsize > i->count)
1475 maxsize = i->count;
3d671ca6
AV
1476 if (!maxsize)
1477 return 0;
7392ed17
AV
1478 if (maxsize > MAX_RW_COUNT)
1479 maxsize = MAX_RW_COUNT;
e5393fae 1480
fcb14cb1 1481 if (likely(user_backed_iter(i))) {
3337ab08 1482 unsigned int gup_flags = 0;
3d671ca6 1483 unsigned long addr;
e5393fae 1484
3337ab08
AG
1485 if (iov_iter_rw(i) != WRITE)
1486 gup_flags |= FOLL_WRITE;
1487 if (i->nofault)
1488 gup_flags |= FOLL_NOFAULT;
1489
dd45ab9d
AV
1490 addr = first_iovec_segment(i, &maxsize);
1491 *start = addr % PAGE_SIZE;
1492 addr &= PAGE_MASK;
59dbd7d0 1493 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
dda8e5d1
AV
1494 if (n > maxpages)
1495 n = maxpages;
3337ab08 1496 res = get_user_pages_fast(addr, n, gup_flags, pages);
814a6674 1497 if (unlikely(res <= 0))
e5393fae 1498 return res;
59dbd7d0 1499 return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
3d671ca6
AV
1500 }
1501 if (iov_iter_is_bvec(i)) {
1502 struct page *page;
1503
59dbd7d0
AV
1504 page = first_bvec_segment(i, &maxsize, start);
1505 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
dda8e5d1
AV
1506 if (n > maxpages)
1507 n = maxpages;
1508 for (int k = 0; k < n; k++)
3d671ca6 1509 get_page(*pages++ = page++);
59dbd7d0 1510 return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
3d671ca6
AV
1511 }
1512 if (iov_iter_is_pipe(i))
1513 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1514 if (iov_iter_is_xarray(i))
1515 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1516 return -EFAULT;
62a8067a
AV
1517}
1518EXPORT_SYMBOL(iov_iter_get_pages);
1519
1b17f1f2
AV
1520static struct page **get_pages_array(size_t n)
1521{
752ade68 1522 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1b17f1f2
AV
1523}
1524
241699cd
AV
1525static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1526 struct page ***pages, size_t maxsize,
1527 size_t *start)
1528{
1529 struct page **p;
8cefc107 1530 unsigned int iter_head, npages;
d7760d63 1531 ssize_t n;
241699cd
AV
1532
1533 if (!sanity(i))
1534 return -EFAULT;
1535
8cefc107
DH
1536 data_start(i, &iter_head, start);
1537 /* Amount of free space: some of this one + all after this one */
1538 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
241699cd
AV
1539 n = npages * PAGE_SIZE - *start;
1540 if (maxsize > n)
1541 maxsize = n;
1542 else
1543 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1544 p = get_pages_array(npages);
1545 if (!p)
1546 return -ENOMEM;
ca591967 1547 n = __pipe_get_pages(i, maxsize, p, *start);
241699cd
AV
1548 if (n > 0)
1549 *pages = p;
1550 else
1551 kvfree(p);
1552 return n;
1553}
1554
7ff50620
DH
1555static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1556 struct page ***pages, size_t maxsize,
1557 size_t *_start_offset)
1558{
1559 struct page **p;
1560 unsigned nr, offset;
1561 pgoff_t index, count;
6c776766 1562 size_t size = maxsize;
7ff50620
DH
1563 loff_t pos;
1564
1565 if (!size)
1566 return 0;
1567
1568 pos = i->xarray_start + i->iov_offset;
1569 index = pos >> PAGE_SHIFT;
1570 offset = pos & ~PAGE_MASK;
1571 *_start_offset = offset;
1572
1573 count = 1;
1574 if (size > PAGE_SIZE - offset) {
1575 size -= PAGE_SIZE - offset;
1576 count += size >> PAGE_SHIFT;
1577 size &= ~PAGE_MASK;
1578 if (size)
1579 count++;
1580 }
1581
1582 p = get_pages_array(count);
1583 if (!p)
1584 return -ENOMEM;
1585 *pages = p;
1586
1587 nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1588 if (nr == 0)
1589 return 0;
1590
1c27f1fc 1591 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
7ff50620
DH
1592}
1593
62a8067a
AV
1594ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1595 struct page ***pages, size_t maxsize,
1596 size_t *start)
1597{
1b17f1f2 1598 struct page **p;
3d671ca6 1599 int n, res;
1b17f1f2
AV
1600
1601 if (maxsize > i->count)
1602 maxsize = i->count;
3d671ca6
AV
1603 if (!maxsize)
1604 return 0;
7392ed17
AV
1605 if (maxsize > MAX_RW_COUNT)
1606 maxsize = MAX_RW_COUNT;
1b17f1f2 1607
fcb14cb1 1608 if (likely(user_backed_iter(i))) {
3337ab08 1609 unsigned int gup_flags = 0;
3d671ca6 1610 unsigned long addr;
1b17f1f2 1611
3337ab08
AG
1612 if (iov_iter_rw(i) != WRITE)
1613 gup_flags |= FOLL_WRITE;
1614 if (i->nofault)
1615 gup_flags |= FOLL_NOFAULT;
1616
dd45ab9d
AV
1617 addr = first_iovec_segment(i, &maxsize);
1618 *start = addr % PAGE_SIZE;
1619 addr &= PAGE_MASK;
59dbd7d0 1620 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1b17f1f2
AV
1621 p = get_pages_array(n);
1622 if (!p)
1623 return -ENOMEM;
3337ab08 1624 res = get_user_pages_fast(addr, n, gup_flags, p);
814a6674 1625 if (unlikely(res <= 0)) {
1b17f1f2 1626 kvfree(p);
814a6674 1627 *pages = NULL;
1b17f1f2
AV
1628 return res;
1629 }
1630 *pages = p;
59dbd7d0 1631 return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
3d671ca6
AV
1632 }
1633 if (iov_iter_is_bvec(i)) {
1634 struct page *page;
1635
59dbd7d0
AV
1636 page = first_bvec_segment(i, &maxsize, start);
1637 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
3d671ca6 1638 *pages = p = get_pages_array(n);
1b17f1f2
AV
1639 if (!p)
1640 return -ENOMEM;
dda8e5d1 1641 for (int k = 0; k < n; k++)
3d671ca6 1642 get_page(*p++ = page++);
59dbd7d0 1643 return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
3d671ca6
AV
1644 }
1645 if (iov_iter_is_pipe(i))
1646 return pipe_get_pages_alloc(i, pages, maxsize, start);
1647 if (iov_iter_is_xarray(i))
1648 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1649 return -EFAULT;
62a8067a
AV
1650}
1651EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1652
a604ec7e
AV
1653size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1654 struct iov_iter *i)
1655{
a604ec7e 1656 __wsum sum, next;
a604ec7e 1657 sum = *csum;
9ea9ce04 1658 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd
AV
1659 WARN_ON(1);
1660 return 0;
1661 }
7baa5099
AV
1662 iterate_and_advance(i, bytes, base, len, off, ({
1663 next = csum_and_copy_from_user(base, addr + off, len);
2495bdcc 1664 sum = csum_block_add(sum, next, off);
7baa5099 1665 next ? 0 : len;
a604ec7e 1666 }), ({
7baa5099 1667 sum = csum_and_memcpy(addr + off, base, len, sum, off);
a604ec7e
AV
1668 })
1669 )
1670 *csum = sum;
1671 return bytes;
1672}
1673EXPORT_SYMBOL(csum_and_copy_from_iter);
1674
52cbd23a 1675size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
a604ec7e
AV
1676 struct iov_iter *i)
1677{
52cbd23a 1678 struct csum_state *csstate = _csstate;
a604ec7e 1679 __wsum sum, next;
78e1f386 1680
78e1f386 1681 if (unlikely(iov_iter_is_discard(i))) {
241699cd
AV
1682 WARN_ON(1); /* for now */
1683 return 0;
1684 }
6852df12
AV
1685
1686 sum = csum_shift(csstate->csum, csstate->off);
1687 if (unlikely(iov_iter_is_pipe(i)))
1688 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1689 else iterate_and_advance(i, bytes, base, len, off, ({
7baa5099 1690 next = csum_and_copy_to_user(addr + off, base, len);
2495bdcc 1691 sum = csum_block_add(sum, next, off);
7baa5099 1692 next ? 0 : len;
a604ec7e 1693 }), ({
7baa5099 1694 sum = csum_and_memcpy(base, addr + off, len, sum, off);
a604ec7e
AV
1695 })
1696 )
594e450b
AV
1697 csstate->csum = csum_shift(sum, csstate->off);
1698 csstate->off += bytes;
a604ec7e
AV
1699 return bytes;
1700}
1701EXPORT_SYMBOL(csum_and_copy_to_iter);
1702
d05f4435
SG
1703size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1704 struct iov_iter *i)
1705{
7999096f 1706#ifdef CONFIG_CRYPTO_HASH
d05f4435
SG
1707 struct ahash_request *hash = hashp;
1708 struct scatterlist sg;
1709 size_t copied;
1710
1711 copied = copy_to_iter(addr, bytes, i);
1712 sg_init_one(&sg, addr, copied);
1713 ahash_request_set_crypt(hash, &sg, NULL, copied);
1714 crypto_ahash_update(hash);
1715 return copied;
27fad74a
Y
1716#else
1717 return 0;
1718#endif
d05f4435
SG
1719}
1720EXPORT_SYMBOL(hash_and_copy_to_iter);
1721
66531c65 1722static int iov_npages(const struct iov_iter *i, int maxpages)
62a8067a 1723{
66531c65
AV
1724 size_t skip = i->iov_offset, size = i->count;
1725 const struct iovec *p;
e0f2dc40
AV
1726 int npages = 0;
1727
66531c65
AV
1728 for (p = i->iov; size; skip = 0, p++) {
1729 unsigned offs = offset_in_page(p->iov_base + skip);
1730 size_t len = min(p->iov_len - skip, size);
e0f2dc40 1731
66531c65
AV
1732 if (len) {
1733 size -= len;
1734 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1735 if (unlikely(npages > maxpages))
1736 return maxpages;
1737 }
1738 }
1739 return npages;
1740}
1741
1742static int bvec_npages(const struct iov_iter *i, int maxpages)
1743{
1744 size_t skip = i->iov_offset, size = i->count;
1745 const struct bio_vec *p;
1746 int npages = 0;
1747
1748 for (p = i->bvec; size; skip = 0, p++) {
1749 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1750 size_t len = min(p->bv_len - skip, size);
1751
1752 size -= len;
1753 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1754 if (unlikely(npages > maxpages))
1755 return maxpages;
1756 }
1757 return npages;
1758}
1759
1760int iov_iter_npages(const struct iov_iter *i, int maxpages)
1761{
1762 if (unlikely(!i->count))
1763 return 0;
fcb14cb1
AV
1764 if (likely(iter_is_ubuf(i))) {
1765 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1766 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1767 return min(npages, maxpages);
1768 }
66531c65
AV
1769 /* iovec and kvec have identical layouts */
1770 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1771 return iov_npages(i, maxpages);
1772 if (iov_iter_is_bvec(i))
1773 return bvec_npages(i, maxpages);
1774 if (iov_iter_is_pipe(i)) {
8cefc107 1775 unsigned int iter_head;
66531c65 1776 int npages;
241699cd 1777 size_t off;
241699cd
AV
1778
1779 if (!sanity(i))
1780 return 0;
1781
8cefc107 1782 data_start(i, &iter_head, &off);
241699cd 1783 /* some of this one + all after this one */
66531c65
AV
1784 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1785 return min(npages, maxpages);
1786 }
1787 if (iov_iter_is_xarray(i)) {
e4f8df86
AV
1788 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1789 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
66531c65
AV
1790 return min(npages, maxpages);
1791 }
1792 return 0;
62a8067a 1793}
f67da30c 1794EXPORT_SYMBOL(iov_iter_npages);
4b8164b9
AV
1795
1796const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1797{
1798 *new = *old;
00e23707 1799 if (unlikely(iov_iter_is_pipe(new))) {
241699cd
AV
1800 WARN_ON(1);
1801 return NULL;
1802 }
00e23707 1803 if (iov_iter_is_bvec(new))
4b8164b9
AV
1804 return new->bvec = kmemdup(new->bvec,
1805 new->nr_segs * sizeof(struct bio_vec),
1806 flags);
fcb14cb1 1807 else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
4b8164b9
AV
1808 /* iovec and kvec have identical layout */
1809 return new->iov = kmemdup(new->iov,
1810 new->nr_segs * sizeof(struct iovec),
1811 flags);
fcb14cb1 1812 return NULL;
4b8164b9
AV
1813}
1814EXPORT_SYMBOL(dup_iter);
bc917be8 1815
bfdc5970
CH
1816static int copy_compat_iovec_from_user(struct iovec *iov,
1817 const struct iovec __user *uvec, unsigned long nr_segs)
1818{
1819 const struct compat_iovec __user *uiov =
1820 (const struct compat_iovec __user *)uvec;
1821 int ret = -EFAULT, i;
1822
a959a978 1823 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
bfdc5970
CH
1824 return -EFAULT;
1825
1826 for (i = 0; i < nr_segs; i++) {
1827 compat_uptr_t buf;
1828 compat_ssize_t len;
1829
1830 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1831 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1832
1833 /* check for compat_size_t not fitting in compat_ssize_t .. */
1834 if (len < 0) {
1835 ret = -EINVAL;
1836 goto uaccess_end;
1837 }
1838 iov[i].iov_base = compat_ptr(buf);
1839 iov[i].iov_len = len;
1840 }
1841
1842 ret = 0;
1843uaccess_end:
1844 user_access_end();
1845 return ret;
1846}
1847
1848static int copy_iovec_from_user(struct iovec *iov,
1849 const struct iovec __user *uvec, unsigned long nr_segs)
fb041b59
DL
1850{
1851 unsigned long seg;
fb041b59 1852
bfdc5970
CH
1853 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1854 return -EFAULT;
1855 for (seg = 0; seg < nr_segs; seg++) {
1856 if ((ssize_t)iov[seg].iov_len < 0)
1857 return -EINVAL;
fb041b59
DL
1858 }
1859
bfdc5970
CH
1860 return 0;
1861}
1862
1863struct iovec *iovec_from_user(const struct iovec __user *uvec,
1864 unsigned long nr_segs, unsigned long fast_segs,
1865 struct iovec *fast_iov, bool compat)
1866{
1867 struct iovec *iov = fast_iov;
1868 int ret;
1869
fb041b59 1870 /*
bfdc5970
CH
1871 * SuS says "The readv() function *may* fail if the iovcnt argument was
1872 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1873 * traditionally returned zero for zero segments, so...
fb041b59 1874 */
bfdc5970
CH
1875 if (nr_segs == 0)
1876 return iov;
1877 if (nr_segs > UIO_MAXIOV)
1878 return ERR_PTR(-EINVAL);
fb041b59
DL
1879 if (nr_segs > fast_segs) {
1880 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
bfdc5970
CH
1881 if (!iov)
1882 return ERR_PTR(-ENOMEM);
fb041b59 1883 }
bfdc5970
CH
1884
1885 if (compat)
1886 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1887 else
1888 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1889 if (ret) {
1890 if (iov != fast_iov)
1891 kfree(iov);
1892 return ERR_PTR(ret);
1893 }
1894
1895 return iov;
1896}
1897
1898ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1899 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1900 struct iov_iter *i, bool compat)
1901{
1902 ssize_t total_len = 0;
1903 unsigned long seg;
1904 struct iovec *iov;
1905
1906 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1907 if (IS_ERR(iov)) {
1908 *iovp = NULL;
1909 return PTR_ERR(iov);
fb041b59
DL
1910 }
1911
1912 /*
bfdc5970
CH
1913 * According to the Single Unix Specification we should return EINVAL if
1914 * an element length is < 0 when cast to ssize_t or if the total length
1915 * would overflow the ssize_t return value of the system call.
fb041b59
DL
1916 *
1917 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1918 * overflow case.
1919 */
fb041b59 1920 for (seg = 0; seg < nr_segs; seg++) {
fb041b59
DL
1921 ssize_t len = (ssize_t)iov[seg].iov_len;
1922
bfdc5970
CH
1923 if (!access_ok(iov[seg].iov_base, len)) {
1924 if (iov != *iovp)
1925 kfree(iov);
1926 *iovp = NULL;
1927 return -EFAULT;
fb041b59 1928 }
bfdc5970
CH
1929
1930 if (len > MAX_RW_COUNT - total_len) {
1931 len = MAX_RW_COUNT - total_len;
fb041b59
DL
1932 iov[seg].iov_len = len;
1933 }
bfdc5970 1934 total_len += len;
fb041b59 1935 }
bfdc5970
CH
1936
1937 iov_iter_init(i, type, iov, nr_segs, total_len);
1938 if (iov == *iovp)
1939 *iovp = NULL;
1940 else
1941 *iovp = iov;
1942 return total_len;
fb041b59
DL
1943}
1944
ffecee4f
VN
1945/**
1946 * import_iovec() - Copy an array of &struct iovec from userspace
1947 * into the kernel, check that it is valid, and initialize a new
1948 * &struct iov_iter iterator to access it.
1949 *
1950 * @type: One of %READ or %WRITE.
bfdc5970 1951 * @uvec: Pointer to the userspace array.
ffecee4f
VN
1952 * @nr_segs: Number of elements in userspace array.
1953 * @fast_segs: Number of elements in @iov.
bfdc5970 1954 * @iovp: (input and output parameter) Pointer to pointer to (usually small
ffecee4f
VN
1955 * on-stack) kernel array.
1956 * @i: Pointer to iterator that will be initialized on success.
1957 *
1958 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1959 * then this function places %NULL in *@iov on return. Otherwise, a new
1960 * array will be allocated and the result placed in *@iov. This means that
1961 * the caller may call kfree() on *@iov regardless of whether the small
1962 * on-stack array was used or not (and regardless of whether this function
1963 * returns an error or not).
1964 *
87e5e6da 1965 * Return: Negative error code on error, bytes imported on success
ffecee4f 1966 */
bfdc5970 1967ssize_t import_iovec(int type, const struct iovec __user *uvec,
bc917be8 1968 unsigned nr_segs, unsigned fast_segs,
bfdc5970 1969 struct iovec **iovp, struct iov_iter *i)
bc917be8 1970{
89cd35c5
CH
1971 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1972 in_compat_syscall());
bc917be8
AV
1973}
1974EXPORT_SYMBOL(import_iovec);
1975
bc917be8
AV
1976int import_single_range(int rw, void __user *buf, size_t len,
1977 struct iovec *iov, struct iov_iter *i)
1978{
1979 if (len > MAX_RW_COUNT)
1980 len = MAX_RW_COUNT;
96d4f267 1981 if (unlikely(!access_ok(buf, len)))
bc917be8
AV
1982 return -EFAULT;
1983
1984 iov->iov_base = buf;
1985 iov->iov_len = len;
1986 iov_iter_init(i, rw, iov, 1, len);
1987 return 0;
1988}
e1267585 1989EXPORT_SYMBOL(import_single_range);
8fb0f47a
JA
1990
1991/**
1992 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1993 * iov_iter_save_state() was called.
1994 *
1995 * @i: &struct iov_iter to restore
1996 * @state: state to restore from
1997 *
1998 * Used after iov_iter_save_state() to bring restore @i, if operations may
1999 * have advanced it.
2000 *
2001 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
2002 */
2003void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
2004{
2005 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
fcb14cb1 2006 !iov_iter_is_kvec(i) && !iter_is_ubuf(i))
8fb0f47a
JA
2007 return;
2008 i->iov_offset = state->iov_offset;
2009 i->count = state->count;
fcb14cb1
AV
2010 if (iter_is_ubuf(i))
2011 return;
8fb0f47a
JA
2012 /*
2013 * For the *vec iters, nr_segs + iov is constant - if we increment
2014 * the vec, then we also decrement the nr_segs count. Hence we don't
2015 * need to track both of these, just one is enough and we can deduct
2016 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
2017 * size, so we can just increment the iov pointer as they are unionzed.
2018 * ITER_BVEC _may_ be the same size on some archs, but on others it is
2019 * not. Be safe and handle it separately.
2020 */
2021 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
2022 if (iov_iter_is_bvec(i))
2023 i->bvec -= state->nr_segs - i->nr_segs;
2024 else
2025 i->iov -= state->nr_segs - i->nr_segs;
2026 i->nr_segs = state->nr_segs;
2027}