Merge tag 'acpi-4.18-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux-2.6-block.git] / lib / iov_iter.c
CommitLineData
4f18cd31 1#include <linux/export.h>
2f8b5444 2#include <linux/bvec.h>
4f18cd31
AV
3#include <linux/uio.h>
4#include <linux/pagemap.h>
91f79c43
AV
5#include <linux/slab.h>
6#include <linux/vmalloc.h>
241699cd 7#include <linux/splice.h>
a604ec7e 8#include <net/checksum.h>
4f18cd31 9
241699cd
AV
10#define PIPE_PARANOIA /* for now */
11
04a31165
AV
12#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
13 size_t left; \
14 size_t wanted = n; \
15 __p = i->iov; \
16 __v.iov_len = min(n, __p->iov_len - skip); \
17 if (likely(__v.iov_len)) { \
18 __v.iov_base = __p->iov_base + skip; \
19 left = (STEP); \
20 __v.iov_len -= left; \
21 skip += __v.iov_len; \
22 n -= __v.iov_len; \
23 } else { \
24 left = 0; \
25 } \
26 while (unlikely(!left && n)) { \
27 __p++; \
28 __v.iov_len = min(n, __p->iov_len); \
29 if (unlikely(!__v.iov_len)) \
30 continue; \
31 __v.iov_base = __p->iov_base; \
32 left = (STEP); \
33 __v.iov_len -= left; \
34 skip = __v.iov_len; \
35 n -= __v.iov_len; \
36 } \
37 n = wanted - n; \
38}
39
a280455f
AV
40#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
41 size_t wanted = n; \
42 __p = i->kvec; \
43 __v.iov_len = min(n, __p->iov_len - skip); \
44 if (likely(__v.iov_len)) { \
45 __v.iov_base = __p->iov_base + skip; \
46 (void)(STEP); \
47 skip += __v.iov_len; \
48 n -= __v.iov_len; \
49 } \
50 while (unlikely(n)) { \
51 __p++; \
52 __v.iov_len = min(n, __p->iov_len); \
53 if (unlikely(!__v.iov_len)) \
54 continue; \
55 __v.iov_base = __p->iov_base; \
56 (void)(STEP); \
57 skip = __v.iov_len; \
58 n -= __v.iov_len; \
59 } \
60 n = wanted; \
61}
62
1bdc76ae
ML
63#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
64 struct bvec_iter __start; \
65 __start.bi_size = n; \
66 __start.bi_bvec_done = skip; \
67 __start.bi_idx = 0; \
68 for_each_bvec(__v, i->bvec, __bi, __start) { \
69 if (!__v.bv_len) \
04a31165 70 continue; \
04a31165 71 (void)(STEP); \
04a31165 72 } \
04a31165
AV
73}
74
a280455f 75#define iterate_all_kinds(i, n, v, I, B, K) { \
33844e66
AV
76 if (likely(n)) { \
77 size_t skip = i->iov_offset; \
78 if (unlikely(i->type & ITER_BVEC)) { \
79 struct bio_vec v; \
80 struct bvec_iter __bi; \
81 iterate_bvec(i, n, v, __bi, skip, (B)) \
82 } else if (unlikely(i->type & ITER_KVEC)) { \
83 const struct kvec *kvec; \
84 struct kvec v; \
85 iterate_kvec(i, n, v, kvec, skip, (K)) \
86 } else { \
87 const struct iovec *iov; \
88 struct iovec v; \
89 iterate_iovec(i, n, v, iov, skip, (I)) \
90 } \
04a31165
AV
91 } \
92}
93
a280455f 94#define iterate_and_advance(i, n, v, I, B, K) { \
dd254f5a
AV
95 if (unlikely(i->count < n)) \
96 n = i->count; \
19f18459 97 if (i->count) { \
dd254f5a
AV
98 size_t skip = i->iov_offset; \
99 if (unlikely(i->type & ITER_BVEC)) { \
1bdc76ae 100 const struct bio_vec *bvec = i->bvec; \
dd254f5a 101 struct bio_vec v; \
1bdc76ae
ML
102 struct bvec_iter __bi; \
103 iterate_bvec(i, n, v, __bi, skip, (B)) \
104 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
105 i->nr_segs -= i->bvec - bvec; \
106 skip = __bi.bi_bvec_done; \
dd254f5a
AV
107 } else if (unlikely(i->type & ITER_KVEC)) { \
108 const struct kvec *kvec; \
109 struct kvec v; \
110 iterate_kvec(i, n, v, kvec, skip, (K)) \
111 if (skip == kvec->iov_len) { \
112 kvec++; \
113 skip = 0; \
114 } \
115 i->nr_segs -= kvec - i->kvec; \
116 i->kvec = kvec; \
117 } else { \
118 const struct iovec *iov; \
119 struct iovec v; \
120 iterate_iovec(i, n, v, iov, skip, (I)) \
121 if (skip == iov->iov_len) { \
122 iov++; \
123 skip = 0; \
124 } \
125 i->nr_segs -= iov - i->iov; \
126 i->iov = iov; \
7ce2a91e 127 } \
dd254f5a
AV
128 i->count -= n; \
129 i->iov_offset = skip; \
7ce2a91e 130 } \
7ce2a91e
AV
131}
132
09fc68dc
AV
133static int copyout(void __user *to, const void *from, size_t n)
134{
135 if (access_ok(VERIFY_WRITE, to, n)) {
136 kasan_check_read(from, n);
137 n = raw_copy_to_user(to, from, n);
138 }
139 return n;
140}
141
142static int copyin(void *to, const void __user *from, size_t n)
143{
144 if (access_ok(VERIFY_READ, from, n)) {
145 kasan_check_write(to, n);
146 n = raw_copy_from_user(to, from, n);
147 }
148 return n;
149}
150
62a8067a 151static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
4f18cd31
AV
152 struct iov_iter *i)
153{
154 size_t skip, copy, left, wanted;
155 const struct iovec *iov;
156 char __user *buf;
157 void *kaddr, *from;
158
159 if (unlikely(bytes > i->count))
160 bytes = i->count;
161
162 if (unlikely(!bytes))
163 return 0;
164
09fc68dc 165 might_fault();
4f18cd31
AV
166 wanted = bytes;
167 iov = i->iov;
168 skip = i->iov_offset;
169 buf = iov->iov_base + skip;
170 copy = min(bytes, iov->iov_len - skip);
171
3fa6c507 172 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
4f18cd31
AV
173 kaddr = kmap_atomic(page);
174 from = kaddr + offset;
175
176 /* first chunk, usually the only one */
09fc68dc 177 left = copyout(buf, from, copy);
4f18cd31
AV
178 copy -= left;
179 skip += copy;
180 from += copy;
181 bytes -= copy;
182
183 while (unlikely(!left && bytes)) {
184 iov++;
185 buf = iov->iov_base;
186 copy = min(bytes, iov->iov_len);
09fc68dc 187 left = copyout(buf, from, copy);
4f18cd31
AV
188 copy -= left;
189 skip = copy;
190 from += copy;
191 bytes -= copy;
192 }
193 if (likely(!bytes)) {
194 kunmap_atomic(kaddr);
195 goto done;
196 }
197 offset = from - kaddr;
198 buf += copy;
199 kunmap_atomic(kaddr);
200 copy = min(bytes, iov->iov_len - skip);
201 }
202 /* Too bad - revert to non-atomic kmap */
3fa6c507 203
4f18cd31
AV
204 kaddr = kmap(page);
205 from = kaddr + offset;
09fc68dc 206 left = copyout(buf, from, copy);
4f18cd31
AV
207 copy -= left;
208 skip += copy;
209 from += copy;
210 bytes -= copy;
211 while (unlikely(!left && bytes)) {
212 iov++;
213 buf = iov->iov_base;
214 copy = min(bytes, iov->iov_len);
09fc68dc 215 left = copyout(buf, from, copy);
4f18cd31
AV
216 copy -= left;
217 skip = copy;
218 from += copy;
219 bytes -= copy;
220 }
221 kunmap(page);
3fa6c507 222
4f18cd31 223done:
81055e58
AV
224 if (skip == iov->iov_len) {
225 iov++;
226 skip = 0;
227 }
4f18cd31
AV
228 i->count -= wanted - bytes;
229 i->nr_segs -= iov - i->iov;
230 i->iov = iov;
231 i->iov_offset = skip;
232 return wanted - bytes;
233}
4f18cd31 234
62a8067a 235static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
f0d1bec9
AV
236 struct iov_iter *i)
237{
238 size_t skip, copy, left, wanted;
239 const struct iovec *iov;
240 char __user *buf;
241 void *kaddr, *to;
242
243 if (unlikely(bytes > i->count))
244 bytes = i->count;
245
246 if (unlikely(!bytes))
247 return 0;
248
09fc68dc 249 might_fault();
f0d1bec9
AV
250 wanted = bytes;
251 iov = i->iov;
252 skip = i->iov_offset;
253 buf = iov->iov_base + skip;
254 copy = min(bytes, iov->iov_len - skip);
255
3fa6c507 256 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
f0d1bec9
AV
257 kaddr = kmap_atomic(page);
258 to = kaddr + offset;
259
260 /* first chunk, usually the only one */
09fc68dc 261 left = copyin(to, buf, copy);
f0d1bec9
AV
262 copy -= left;
263 skip += copy;
264 to += copy;
265 bytes -= copy;
266
267 while (unlikely(!left && bytes)) {
268 iov++;
269 buf = iov->iov_base;
270 copy = min(bytes, iov->iov_len);
09fc68dc 271 left = copyin(to, buf, copy);
f0d1bec9
AV
272 copy -= left;
273 skip = copy;
274 to += copy;
275 bytes -= copy;
276 }
277 if (likely(!bytes)) {
278 kunmap_atomic(kaddr);
279 goto done;
280 }
281 offset = to - kaddr;
282 buf += copy;
283 kunmap_atomic(kaddr);
284 copy = min(bytes, iov->iov_len - skip);
285 }
286 /* Too bad - revert to non-atomic kmap */
3fa6c507 287
f0d1bec9
AV
288 kaddr = kmap(page);
289 to = kaddr + offset;
09fc68dc 290 left = copyin(to, buf, copy);
f0d1bec9
AV
291 copy -= left;
292 skip += copy;
293 to += copy;
294 bytes -= copy;
295 while (unlikely(!left && bytes)) {
296 iov++;
297 buf = iov->iov_base;
298 copy = min(bytes, iov->iov_len);
09fc68dc 299 left = copyin(to, buf, copy);
f0d1bec9
AV
300 copy -= left;
301 skip = copy;
302 to += copy;
303 bytes -= copy;
304 }
305 kunmap(page);
3fa6c507 306
f0d1bec9 307done:
81055e58
AV
308 if (skip == iov->iov_len) {
309 iov++;
310 skip = 0;
311 }
f0d1bec9
AV
312 i->count -= wanted - bytes;
313 i->nr_segs -= iov - i->iov;
314 i->iov = iov;
315 i->iov_offset = skip;
316 return wanted - bytes;
317}
f0d1bec9 318
241699cd
AV
319#ifdef PIPE_PARANOIA
320static bool sanity(const struct iov_iter *i)
321{
322 struct pipe_inode_info *pipe = i->pipe;
323 int idx = i->idx;
324 int next = pipe->curbuf + pipe->nrbufs;
325 if (i->iov_offset) {
326 struct pipe_buffer *p;
327 if (unlikely(!pipe->nrbufs))
328 goto Bad; // pipe must be non-empty
329 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
330 goto Bad; // must be at the last buffer...
331
332 p = &pipe->bufs[idx];
333 if (unlikely(p->offset + p->len != i->iov_offset))
334 goto Bad; // ... at the end of segment
335 } else {
336 if (idx != (next & (pipe->buffers - 1)))
337 goto Bad; // must be right after the last buffer
338 }
339 return true;
340Bad:
341 printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
342 printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
343 pipe->curbuf, pipe->nrbufs, pipe->buffers);
344 for (idx = 0; idx < pipe->buffers; idx++)
345 printk(KERN_ERR "[%p %p %d %d]\n",
346 pipe->bufs[idx].ops,
347 pipe->bufs[idx].page,
348 pipe->bufs[idx].offset,
349 pipe->bufs[idx].len);
350 WARN_ON(1);
351 return false;
352}
353#else
354#define sanity(i) true
355#endif
356
357static inline int next_idx(int idx, struct pipe_inode_info *pipe)
358{
359 return (idx + 1) & (pipe->buffers - 1);
360}
361
362static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
363 struct iov_iter *i)
364{
365 struct pipe_inode_info *pipe = i->pipe;
366 struct pipe_buffer *buf;
367 size_t off;
368 int idx;
369
370 if (unlikely(bytes > i->count))
371 bytes = i->count;
372
373 if (unlikely(!bytes))
374 return 0;
375
376 if (!sanity(i))
377 return 0;
378
379 off = i->iov_offset;
380 idx = i->idx;
381 buf = &pipe->bufs[idx];
382 if (off) {
383 if (offset == off && buf->page == page) {
384 /* merge with the last one */
385 buf->len += bytes;
386 i->iov_offset += bytes;
387 goto out;
388 }
389 idx = next_idx(idx, pipe);
390 buf = &pipe->bufs[idx];
391 }
392 if (idx == pipe->curbuf && pipe->nrbufs)
393 return 0;
394 pipe->nrbufs++;
395 buf->ops = &page_cache_pipe_buf_ops;
396 get_page(buf->page = page);
397 buf->offset = offset;
398 buf->len = bytes;
399 i->iov_offset = offset + bytes;
400 i->idx = idx;
401out:
402 i->count -= bytes;
403 return bytes;
404}
405
171a0203
AA
406/*
407 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
408 * bytes. For each iovec, fault in each page that constitutes the iovec.
409 *
410 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
411 * because it is an invalid address).
412 */
d4690f1e 413int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
171a0203
AA
414{
415 size_t skip = i->iov_offset;
416 const struct iovec *iov;
417 int err;
418 struct iovec v;
419
420 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
421 iterate_iovec(i, bytes, v, iov, skip, ({
4bce9f6e 422 err = fault_in_pages_readable(v.iov_base, v.iov_len);
171a0203
AA
423 if (unlikely(err))
424 return err;
425 0;}))
426 }
427 return 0;
428}
d4690f1e 429EXPORT_SYMBOL(iov_iter_fault_in_readable);
171a0203 430
71d8e532
AV
431void iov_iter_init(struct iov_iter *i, int direction,
432 const struct iovec *iov, unsigned long nr_segs,
433 size_t count)
434{
435 /* It will get better. Eventually... */
db68ce10 436 if (uaccess_kernel()) {
62a8067a 437 direction |= ITER_KVEC;
a280455f
AV
438 i->type = direction;
439 i->kvec = (struct kvec *)iov;
440 } else {
441 i->type = direction;
442 i->iov = iov;
443 }
71d8e532
AV
444 i->nr_segs = nr_segs;
445 i->iov_offset = 0;
446 i->count = count;
447}
448EXPORT_SYMBOL(iov_iter_init);
7b2c99d1 449
62a8067a
AV
450static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
451{
452 char *from = kmap_atomic(page);
453 memcpy(to, from + offset, len);
454 kunmap_atomic(from);
455}
456
36f7a8a4 457static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
62a8067a
AV
458{
459 char *to = kmap_atomic(page);
460 memcpy(to + offset, from, len);
461 kunmap_atomic(to);
462}
463
c35e0248
MW
464static void memzero_page(struct page *page, size_t offset, size_t len)
465{
466 char *addr = kmap_atomic(page);
467 memset(addr + offset, 0, len);
468 kunmap_atomic(addr);
469}
470
241699cd
AV
471static inline bool allocated(struct pipe_buffer *buf)
472{
473 return buf->ops == &default_pipe_buf_ops;
474}
475
476static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
477{
478 size_t off = i->iov_offset;
479 int idx = i->idx;
480 if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
481 idx = next_idx(idx, i->pipe);
482 off = 0;
483 }
484 *idxp = idx;
485 *offp = off;
486}
487
488static size_t push_pipe(struct iov_iter *i, size_t size,
489 int *idxp, size_t *offp)
490{
491 struct pipe_inode_info *pipe = i->pipe;
492 size_t off;
493 int idx;
494 ssize_t left;
495
496 if (unlikely(size > i->count))
497 size = i->count;
498 if (unlikely(!size))
499 return 0;
500
501 left = size;
502 data_start(i, &idx, &off);
503 *idxp = idx;
504 *offp = off;
505 if (off) {
506 left -= PAGE_SIZE - off;
507 if (left <= 0) {
508 pipe->bufs[idx].len += size;
509 return size;
510 }
511 pipe->bufs[idx].len = PAGE_SIZE;
512 idx = next_idx(idx, pipe);
513 }
514 while (idx != pipe->curbuf || !pipe->nrbufs) {
515 struct page *page = alloc_page(GFP_USER);
516 if (!page)
517 break;
518 pipe->nrbufs++;
519 pipe->bufs[idx].ops = &default_pipe_buf_ops;
520 pipe->bufs[idx].page = page;
521 pipe->bufs[idx].offset = 0;
522 if (left <= PAGE_SIZE) {
523 pipe->bufs[idx].len = left;
524 return size;
525 }
526 pipe->bufs[idx].len = PAGE_SIZE;
527 left -= PAGE_SIZE;
528 idx = next_idx(idx, pipe);
529 }
530 return size - left;
531}
532
533static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
534 struct iov_iter *i)
535{
536 struct pipe_inode_info *pipe = i->pipe;
537 size_t n, off;
538 int idx;
539
540 if (!sanity(i))
541 return 0;
542
543 bytes = n = push_pipe(i, bytes, &idx, &off);
544 if (unlikely(!n))
545 return 0;
546 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
547 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
548 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
549 i->idx = idx;
550 i->iov_offset = off + chunk;
551 n -= chunk;
552 addr += chunk;
553 }
554 i->count -= bytes;
555 return bytes;
556}
557
aa28de27 558size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
62a8067a 559{
36f7a8a4 560 const char *from = addr;
241699cd
AV
561 if (unlikely(i->type & ITER_PIPE))
562 return copy_pipe_to_iter(addr, bytes, i);
09fc68dc
AV
563 if (iter_is_iovec(i))
564 might_fault();
3d4d3e48 565 iterate_and_advance(i, bytes, v,
09fc68dc 566 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
3d4d3e48 567 memcpy_to_page(v.bv_page, v.bv_offset,
a280455f
AV
568 (from += v.bv_len) - v.bv_len, v.bv_len),
569 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
3d4d3e48 570 )
62a8067a 571
3d4d3e48 572 return bytes;
c35e0248 573}
aa28de27 574EXPORT_SYMBOL(_copy_to_iter);
c35e0248 575
8780356e
DW
576#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
577static int copyout_mcsafe(void __user *to, const void *from, size_t n)
578{
579 if (access_ok(VERIFY_WRITE, to, n)) {
580 kasan_check_read(from, n);
581 n = copy_to_user_mcsafe((__force void *) to, from, n);
582 }
583 return n;
584}
585
586static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
587 const char *from, size_t len)
588{
589 unsigned long ret;
590 char *to;
591
592 to = kmap_atomic(page);
593 ret = memcpy_mcsafe(to + offset, from, len);
594 kunmap_atomic(to);
595
596 return ret;
597}
598
599size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
600{
601 const char *from = addr;
602 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
603
604 if (unlikely(i->type & ITER_PIPE)) {
605 WARN_ON(1);
606 return 0;
607 }
608 if (iter_is_iovec(i))
609 might_fault();
610 iterate_and_advance(i, bytes, v,
611 copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
612 ({
613 rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
614 (from += v.bv_len) - v.bv_len, v.bv_len);
615 if (rem) {
616 curr_addr = (unsigned long) from;
617 bytes = curr_addr - s_addr - rem;
618 return bytes;
619 }
620 }),
621 ({
622 rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
623 v.iov_len);
624 if (rem) {
625 curr_addr = (unsigned long) from;
626 bytes = curr_addr - s_addr - rem;
627 return bytes;
628 }
629 })
630 )
631
632 return bytes;
633}
634EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
635#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
636
aa28de27 637size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
c35e0248 638{
0dbca9a4 639 char *to = addr;
241699cd
AV
640 if (unlikely(i->type & ITER_PIPE)) {
641 WARN_ON(1);
642 return 0;
643 }
09fc68dc
AV
644 if (iter_is_iovec(i))
645 might_fault();
0dbca9a4 646 iterate_and_advance(i, bytes, v,
09fc68dc 647 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
0dbca9a4 648 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
a280455f
AV
649 v.bv_offset, v.bv_len),
650 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
0dbca9a4
AV
651 )
652
653 return bytes;
c35e0248 654}
aa28de27 655EXPORT_SYMBOL(_copy_from_iter);
c35e0248 656
aa28de27 657bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
cbbd26b8
AV
658{
659 char *to = addr;
660 if (unlikely(i->type & ITER_PIPE)) {
661 WARN_ON(1);
662 return false;
663 }
33844e66 664 if (unlikely(i->count < bytes))
cbbd26b8
AV
665 return false;
666
09fc68dc
AV
667 if (iter_is_iovec(i))
668 might_fault();
cbbd26b8 669 iterate_all_kinds(i, bytes, v, ({
09fc68dc 670 if (copyin((to += v.iov_len) - v.iov_len,
cbbd26b8
AV
671 v.iov_base, v.iov_len))
672 return false;
673 0;}),
674 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
675 v.bv_offset, v.bv_len),
676 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
677 )
678
679 iov_iter_advance(i, bytes);
680 return true;
681}
aa28de27 682EXPORT_SYMBOL(_copy_from_iter_full);
cbbd26b8 683
aa28de27 684size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
aa583096
AV
685{
686 char *to = addr;
241699cd
AV
687 if (unlikely(i->type & ITER_PIPE)) {
688 WARN_ON(1);
689 return 0;
690 }
aa583096 691 iterate_and_advance(i, bytes, v,
3f763453 692 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
aa583096
AV
693 v.iov_base, v.iov_len),
694 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
695 v.bv_offset, v.bv_len),
696 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
697 )
698
699 return bytes;
700}
aa28de27 701EXPORT_SYMBOL(_copy_from_iter_nocache);
aa583096 702
0aed55af 703#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
6a37e940 704size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
0aed55af
DW
705{
706 char *to = addr;
707 if (unlikely(i->type & ITER_PIPE)) {
708 WARN_ON(1);
709 return 0;
710 }
711 iterate_and_advance(i, bytes, v,
712 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
713 v.iov_base, v.iov_len),
714 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
715 v.bv_offset, v.bv_len),
716 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
717 v.iov_len)
718 )
719
720 return bytes;
721}
6a37e940 722EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
0aed55af
DW
723#endif
724
aa28de27 725bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
cbbd26b8
AV
726{
727 char *to = addr;
728 if (unlikely(i->type & ITER_PIPE)) {
729 WARN_ON(1);
730 return false;
731 }
33844e66 732 if (unlikely(i->count < bytes))
cbbd26b8
AV
733 return false;
734 iterate_all_kinds(i, bytes, v, ({
3f763453 735 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
cbbd26b8
AV
736 v.iov_base, v.iov_len))
737 return false;
738 0;}),
739 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
740 v.bv_offset, v.bv_len),
741 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
742 )
743
744 iov_iter_advance(i, bytes);
745 return true;
746}
aa28de27 747EXPORT_SYMBOL(_copy_from_iter_full_nocache);
cbbd26b8 748
72e809ed
AV
749static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
750{
a90bcb86
PP
751 struct page *head = compound_head(page);
752 size_t v = n + offset + page_address(page) - page_address(head);
753
754 if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
72e809ed
AV
755 return true;
756 WARN_ON(1);
757 return false;
758}
cbbd26b8 759
62a8067a
AV
760size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
761 struct iov_iter *i)
762{
72e809ed
AV
763 if (unlikely(!page_copy_sane(page, offset, bytes)))
764 return 0;
d271524a
AV
765 if (i->type & (ITER_BVEC|ITER_KVEC)) {
766 void *kaddr = kmap_atomic(page);
767 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
768 kunmap_atomic(kaddr);
769 return wanted;
241699cd 770 } else if (likely(!(i->type & ITER_PIPE)))
62a8067a 771 return copy_page_to_iter_iovec(page, offset, bytes, i);
241699cd
AV
772 else
773 return copy_page_to_iter_pipe(page, offset, bytes, i);
62a8067a
AV
774}
775EXPORT_SYMBOL(copy_page_to_iter);
776
777size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
778 struct iov_iter *i)
779{
72e809ed
AV
780 if (unlikely(!page_copy_sane(page, offset, bytes)))
781 return 0;
241699cd
AV
782 if (unlikely(i->type & ITER_PIPE)) {
783 WARN_ON(1);
784 return 0;
785 }
a280455f 786 if (i->type & (ITER_BVEC|ITER_KVEC)) {
d271524a 787 void *kaddr = kmap_atomic(page);
aa28de27 788 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
d271524a
AV
789 kunmap_atomic(kaddr);
790 return wanted;
791 } else
62a8067a
AV
792 return copy_page_from_iter_iovec(page, offset, bytes, i);
793}
794EXPORT_SYMBOL(copy_page_from_iter);
795
241699cd
AV
796static size_t pipe_zero(size_t bytes, struct iov_iter *i)
797{
798 struct pipe_inode_info *pipe = i->pipe;
799 size_t n, off;
800 int idx;
801
802 if (!sanity(i))
803 return 0;
804
805 bytes = n = push_pipe(i, bytes, &idx, &off);
806 if (unlikely(!n))
807 return 0;
808
809 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
810 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
811 memzero_page(pipe->bufs[idx].page, off, chunk);
812 i->idx = idx;
813 i->iov_offset = off + chunk;
814 n -= chunk;
815 }
816 i->count -= bytes;
817 return bytes;
818}
819
c35e0248
MW
820size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
821{
241699cd
AV
822 if (unlikely(i->type & ITER_PIPE))
823 return pipe_zero(bytes, i);
8442fa46 824 iterate_and_advance(i, bytes, v,
09fc68dc 825 clear_user(v.iov_base, v.iov_len),
a280455f
AV
826 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
827 memset(v.iov_base, 0, v.iov_len)
8442fa46
AV
828 )
829
830 return bytes;
c35e0248
MW
831}
832EXPORT_SYMBOL(iov_iter_zero);
833
62a8067a
AV
834size_t iov_iter_copy_from_user_atomic(struct page *page,
835 struct iov_iter *i, unsigned long offset, size_t bytes)
836{
04a31165 837 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
72e809ed
AV
838 if (unlikely(!page_copy_sane(page, offset, bytes))) {
839 kunmap_atomic(kaddr);
840 return 0;
841 }
241699cd
AV
842 if (unlikely(i->type & ITER_PIPE)) {
843 kunmap_atomic(kaddr);
844 WARN_ON(1);
845 return 0;
846 }
04a31165 847 iterate_all_kinds(i, bytes, v,
09fc68dc 848 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
04a31165 849 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
a280455f
AV
850 v.bv_offset, v.bv_len),
851 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
04a31165
AV
852 )
853 kunmap_atomic(kaddr);
854 return bytes;
62a8067a
AV
855}
856EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
857
b9dc6f65
AV
858static inline void pipe_truncate(struct iov_iter *i)
859{
860 struct pipe_inode_info *pipe = i->pipe;
861 if (pipe->nrbufs) {
862 size_t off = i->iov_offset;
863 int idx = i->idx;
864 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
865 if (off) {
866 pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
867 idx = next_idx(idx, pipe);
868 nrbufs++;
869 }
870 while (pipe->nrbufs > nrbufs) {
871 pipe_buf_release(pipe, &pipe->bufs[idx]);
872 idx = next_idx(idx, pipe);
873 pipe->nrbufs--;
874 }
875 }
876}
877
241699cd
AV
878static void pipe_advance(struct iov_iter *i, size_t size)
879{
880 struct pipe_inode_info *pipe = i->pipe;
241699cd
AV
881 if (unlikely(i->count < size))
882 size = i->count;
241699cd 883 if (size) {
b9dc6f65
AV
884 struct pipe_buffer *buf;
885 size_t off = i->iov_offset, left = size;
886 int idx = i->idx;
241699cd 887 if (off) /* make it relative to the beginning of buffer */
b9dc6f65 888 left += off - pipe->bufs[idx].offset;
241699cd
AV
889 while (1) {
890 buf = &pipe->bufs[idx];
b9dc6f65 891 if (left <= buf->len)
241699cd 892 break;
b9dc6f65 893 left -= buf->len;
241699cd
AV
894 idx = next_idx(idx, pipe);
895 }
241699cd 896 i->idx = idx;
b9dc6f65 897 i->iov_offset = buf->offset + left;
241699cd 898 }
b9dc6f65
AV
899 i->count -= size;
900 /* ... and discard everything past that point */
901 pipe_truncate(i);
241699cd
AV
902}
903
62a8067a
AV
904void iov_iter_advance(struct iov_iter *i, size_t size)
905{
241699cd
AV
906 if (unlikely(i->type & ITER_PIPE)) {
907 pipe_advance(i, size);
908 return;
909 }
a280455f 910 iterate_and_advance(i, size, v, 0, 0, 0)
62a8067a
AV
911}
912EXPORT_SYMBOL(iov_iter_advance);
913
27c0e374
AV
914void iov_iter_revert(struct iov_iter *i, size_t unroll)
915{
916 if (!unroll)
917 return;
5b47d59a
AV
918 if (WARN_ON(unroll > MAX_RW_COUNT))
919 return;
27c0e374
AV
920 i->count += unroll;
921 if (unlikely(i->type & ITER_PIPE)) {
922 struct pipe_inode_info *pipe = i->pipe;
923 int idx = i->idx;
924 size_t off = i->iov_offset;
925 while (1) {
926 size_t n = off - pipe->bufs[idx].offset;
927 if (unroll < n) {
4fa55cef 928 off -= unroll;
27c0e374
AV
929 break;
930 }
931 unroll -= n;
932 if (!unroll && idx == i->start_idx) {
933 off = 0;
934 break;
935 }
936 if (!idx--)
937 idx = pipe->buffers - 1;
938 off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
939 }
940 i->iov_offset = off;
941 i->idx = idx;
942 pipe_truncate(i);
943 return;
944 }
945 if (unroll <= i->iov_offset) {
946 i->iov_offset -= unroll;
947 return;
948 }
949 unroll -= i->iov_offset;
950 if (i->type & ITER_BVEC) {
951 const struct bio_vec *bvec = i->bvec;
952 while (1) {
953 size_t n = (--bvec)->bv_len;
954 i->nr_segs++;
955 if (unroll <= n) {
956 i->bvec = bvec;
957 i->iov_offset = n - unroll;
958 return;
959 }
960 unroll -= n;
961 }
962 } else { /* same logics for iovec and kvec */
963 const struct iovec *iov = i->iov;
964 while (1) {
965 size_t n = (--iov)->iov_len;
966 i->nr_segs++;
967 if (unroll <= n) {
968 i->iov = iov;
969 i->iov_offset = n - unroll;
970 return;
971 }
972 unroll -= n;
973 }
974 }
975}
976EXPORT_SYMBOL(iov_iter_revert);
977
62a8067a
AV
978/*
979 * Return the count of just the current iov_iter segment.
980 */
981size_t iov_iter_single_seg_count(const struct iov_iter *i)
982{
241699cd
AV
983 if (unlikely(i->type & ITER_PIPE))
984 return i->count; // it is a silly place, anyway
62a8067a
AV
985 if (i->nr_segs == 1)
986 return i->count;
987 else if (i->type & ITER_BVEC)
62a8067a 988 return min(i->count, i->bvec->bv_len - i->iov_offset);
ad0eab92
PM
989 else
990 return min(i->count, i->iov->iov_len - i->iov_offset);
62a8067a
AV
991}
992EXPORT_SYMBOL(iov_iter_single_seg_count);
993
abb78f87 994void iov_iter_kvec(struct iov_iter *i, int direction,
05afcb77 995 const struct kvec *kvec, unsigned long nr_segs,
abb78f87
AV
996 size_t count)
997{
998 BUG_ON(!(direction & ITER_KVEC));
999 i->type = direction;
05afcb77 1000 i->kvec = kvec;
abb78f87
AV
1001 i->nr_segs = nr_segs;
1002 i->iov_offset = 0;
1003 i->count = count;
1004}
1005EXPORT_SYMBOL(iov_iter_kvec);
1006
05afcb77
AV
1007void iov_iter_bvec(struct iov_iter *i, int direction,
1008 const struct bio_vec *bvec, unsigned long nr_segs,
1009 size_t count)
1010{
1011 BUG_ON(!(direction & ITER_BVEC));
1012 i->type = direction;
1013 i->bvec = bvec;
1014 i->nr_segs = nr_segs;
1015 i->iov_offset = 0;
1016 i->count = count;
1017}
1018EXPORT_SYMBOL(iov_iter_bvec);
1019
241699cd
AV
1020void iov_iter_pipe(struct iov_iter *i, int direction,
1021 struct pipe_inode_info *pipe,
1022 size_t count)
1023{
1024 BUG_ON(direction != ITER_PIPE);
b9dc6f65 1025 WARN_ON(pipe->nrbufs == pipe->buffers);
241699cd
AV
1026 i->type = direction;
1027 i->pipe = pipe;
1028 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1029 i->iov_offset = 0;
1030 i->count = count;
27c0e374 1031 i->start_idx = i->idx;
241699cd
AV
1032}
1033EXPORT_SYMBOL(iov_iter_pipe);
1034
62a8067a
AV
1035unsigned long iov_iter_alignment(const struct iov_iter *i)
1036{
04a31165
AV
1037 unsigned long res = 0;
1038 size_t size = i->count;
1039
241699cd 1040 if (unlikely(i->type & ITER_PIPE)) {
33844e66 1041 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
241699cd
AV
1042 return size | i->iov_offset;
1043 return size;
1044 }
04a31165
AV
1045 iterate_all_kinds(i, size, v,
1046 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
a280455f
AV
1047 res |= v.bv_offset | v.bv_len,
1048 res |= (unsigned long)v.iov_base | v.iov_len
04a31165
AV
1049 )
1050 return res;
62a8067a
AV
1051}
1052EXPORT_SYMBOL(iov_iter_alignment);
1053
357f435d
AV
1054unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1055{
33844e66 1056 unsigned long res = 0;
357f435d 1057 size_t size = i->count;
357f435d 1058
241699cd
AV
1059 if (unlikely(i->type & ITER_PIPE)) {
1060 WARN_ON(1);
1061 return ~0U;
1062 }
1063
357f435d
AV
1064 iterate_all_kinds(i, size, v,
1065 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1066 (size != v.iov_len ? size : 0), 0),
1067 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1068 (size != v.bv_len ? size : 0)),
1069 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1070 (size != v.iov_len ? size : 0))
1071 );
33844e66 1072 return res;
357f435d
AV
1073}
1074EXPORT_SYMBOL(iov_iter_gap_alignment);
1075
e76b6312 1076static inline ssize_t __pipe_get_pages(struct iov_iter *i,
241699cd
AV
1077 size_t maxsize,
1078 struct page **pages,
1079 int idx,
1080 size_t *start)
1081{
1082 struct pipe_inode_info *pipe = i->pipe;
1689c73a 1083 ssize_t n = push_pipe(i, maxsize, &idx, start);
241699cd
AV
1084 if (!n)
1085 return -EFAULT;
1086
1087 maxsize = n;
1088 n += *start;
1689c73a 1089 while (n > 0) {
241699cd
AV
1090 get_page(*pages++ = pipe->bufs[idx].page);
1091 idx = next_idx(idx, pipe);
1092 n -= PAGE_SIZE;
1093 }
1094
1095 return maxsize;
1096}
1097
1098static ssize_t pipe_get_pages(struct iov_iter *i,
1099 struct page **pages, size_t maxsize, unsigned maxpages,
1100 size_t *start)
1101{
1102 unsigned npages;
1103 size_t capacity;
1104 int idx;
1105
33844e66
AV
1106 if (!maxsize)
1107 return 0;
1108
241699cd
AV
1109 if (!sanity(i))
1110 return -EFAULT;
1111
1112 data_start(i, &idx, start);
1113 /* some of this one + all after this one */
1114 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1115 capacity = min(npages,maxpages) * PAGE_SIZE - *start;
1116
1117 return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
1118}
1119
62a8067a 1120ssize_t iov_iter_get_pages(struct iov_iter *i,
2c80929c 1121 struct page **pages, size_t maxsize, unsigned maxpages,
62a8067a
AV
1122 size_t *start)
1123{
e5393fae
AV
1124 if (maxsize > i->count)
1125 maxsize = i->count;
1126
241699cd
AV
1127 if (unlikely(i->type & ITER_PIPE))
1128 return pipe_get_pages(i, pages, maxsize, maxpages, start);
e5393fae
AV
1129 iterate_all_kinds(i, maxsize, v, ({
1130 unsigned long addr = (unsigned long)v.iov_base;
1131 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1132 int n;
1133 int res;
1134
1135 if (len > maxpages * PAGE_SIZE)
1136 len = maxpages * PAGE_SIZE;
1137 addr &= ~(PAGE_SIZE - 1);
1138 n = DIV_ROUND_UP(len, PAGE_SIZE);
1139 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
1140 if (unlikely(res < 0))
1141 return res;
1142 return (res == n ? len : res * PAGE_SIZE) - *start;
1143 0;}),({
1144 /* can't be more than PAGE_SIZE */
1145 *start = v.bv_offset;
1146 get_page(*pages = v.bv_page);
1147 return v.bv_len;
a280455f
AV
1148 }),({
1149 return -EFAULT;
e5393fae
AV
1150 })
1151 )
1152 return 0;
62a8067a
AV
1153}
1154EXPORT_SYMBOL(iov_iter_get_pages);
1155
1b17f1f2
AV
1156static struct page **get_pages_array(size_t n)
1157{
752ade68 1158 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1b17f1f2
AV
1159}
1160
241699cd
AV
1161static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1162 struct page ***pages, size_t maxsize,
1163 size_t *start)
1164{
1165 struct page **p;
d7760d63 1166 ssize_t n;
241699cd
AV
1167 int idx;
1168 int npages;
1169
33844e66
AV
1170 if (!maxsize)
1171 return 0;
1172
241699cd
AV
1173 if (!sanity(i))
1174 return -EFAULT;
1175
1176 data_start(i, &idx, start);
1177 /* some of this one + all after this one */
1178 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1179 n = npages * PAGE_SIZE - *start;
1180 if (maxsize > n)
1181 maxsize = n;
1182 else
1183 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1184 p = get_pages_array(npages);
1185 if (!p)
1186 return -ENOMEM;
1187 n = __pipe_get_pages(i, maxsize, p, idx, start);
1188 if (n > 0)
1189 *pages = p;
1190 else
1191 kvfree(p);
1192 return n;
1193}
1194
62a8067a
AV
1195ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1196 struct page ***pages, size_t maxsize,
1197 size_t *start)
1198{
1b17f1f2
AV
1199 struct page **p;
1200
1201 if (maxsize > i->count)
1202 maxsize = i->count;
1203
241699cd
AV
1204 if (unlikely(i->type & ITER_PIPE))
1205 return pipe_get_pages_alloc(i, pages, maxsize, start);
1b17f1f2
AV
1206 iterate_all_kinds(i, maxsize, v, ({
1207 unsigned long addr = (unsigned long)v.iov_base;
1208 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1209 int n;
1210 int res;
1211
1212 addr &= ~(PAGE_SIZE - 1);
1213 n = DIV_ROUND_UP(len, PAGE_SIZE);
1214 p = get_pages_array(n);
1215 if (!p)
1216 return -ENOMEM;
1217 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1218 if (unlikely(res < 0)) {
1219 kvfree(p);
1220 return res;
1221 }
1222 *pages = p;
1223 return (res == n ? len : res * PAGE_SIZE) - *start;
1224 0;}),({
1225 /* can't be more than PAGE_SIZE */
1226 *start = v.bv_offset;
1227 *pages = p = get_pages_array(1);
1228 if (!p)
1229 return -ENOMEM;
1230 get_page(*p = v.bv_page);
1231 return v.bv_len;
a280455f
AV
1232 }),({
1233 return -EFAULT;
1b17f1f2
AV
1234 })
1235 )
1236 return 0;
62a8067a
AV
1237}
1238EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1239
a604ec7e
AV
1240size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1241 struct iov_iter *i)
1242{
1243 char *to = addr;
1244 __wsum sum, next;
1245 size_t off = 0;
a604ec7e 1246 sum = *csum;
241699cd
AV
1247 if (unlikely(i->type & ITER_PIPE)) {
1248 WARN_ON(1);
1249 return 0;
1250 }
a604ec7e
AV
1251 iterate_and_advance(i, bytes, v, ({
1252 int err = 0;
cbbd26b8 1253 next = csum_and_copy_from_user(v.iov_base,
a604ec7e
AV
1254 (to += v.iov_len) - v.iov_len,
1255 v.iov_len, 0, &err);
1256 if (!err) {
1257 sum = csum_block_add(sum, next, off);
1258 off += v.iov_len;
1259 }
1260 err ? v.iov_len : 0;
1261 }), ({
1262 char *p = kmap_atomic(v.bv_page);
1263 next = csum_partial_copy_nocheck(p + v.bv_offset,
1264 (to += v.bv_len) - v.bv_len,
1265 v.bv_len, 0);
1266 kunmap_atomic(p);
1267 sum = csum_block_add(sum, next, off);
1268 off += v.bv_len;
1269 }),({
1270 next = csum_partial_copy_nocheck(v.iov_base,
1271 (to += v.iov_len) - v.iov_len,
1272 v.iov_len, 0);
1273 sum = csum_block_add(sum, next, off);
1274 off += v.iov_len;
1275 })
1276 )
1277 *csum = sum;
1278 return bytes;
1279}
1280EXPORT_SYMBOL(csum_and_copy_from_iter);
1281
cbbd26b8
AV
1282bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1283 struct iov_iter *i)
1284{
1285 char *to = addr;
1286 __wsum sum, next;
1287 size_t off = 0;
1288 sum = *csum;
1289 if (unlikely(i->type & ITER_PIPE)) {
1290 WARN_ON(1);
1291 return false;
1292 }
1293 if (unlikely(i->count < bytes))
1294 return false;
1295 iterate_all_kinds(i, bytes, v, ({
1296 int err = 0;
1297 next = csum_and_copy_from_user(v.iov_base,
1298 (to += v.iov_len) - v.iov_len,
1299 v.iov_len, 0, &err);
1300 if (err)
1301 return false;
1302 sum = csum_block_add(sum, next, off);
1303 off += v.iov_len;
1304 0;
1305 }), ({
1306 char *p = kmap_atomic(v.bv_page);
1307 next = csum_partial_copy_nocheck(p + v.bv_offset,
1308 (to += v.bv_len) - v.bv_len,
1309 v.bv_len, 0);
1310 kunmap_atomic(p);
1311 sum = csum_block_add(sum, next, off);
1312 off += v.bv_len;
1313 }),({
1314 next = csum_partial_copy_nocheck(v.iov_base,
1315 (to += v.iov_len) - v.iov_len,
1316 v.iov_len, 0);
1317 sum = csum_block_add(sum, next, off);
1318 off += v.iov_len;
1319 })
1320 )
1321 *csum = sum;
1322 iov_iter_advance(i, bytes);
1323 return true;
1324}
1325EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1326
36f7a8a4 1327size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
a604ec7e
AV
1328 struct iov_iter *i)
1329{
36f7a8a4 1330 const char *from = addr;
a604ec7e
AV
1331 __wsum sum, next;
1332 size_t off = 0;
a604ec7e 1333 sum = *csum;
241699cd
AV
1334 if (unlikely(i->type & ITER_PIPE)) {
1335 WARN_ON(1); /* for now */
1336 return 0;
1337 }
a604ec7e
AV
1338 iterate_and_advance(i, bytes, v, ({
1339 int err = 0;
1340 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
cbbd26b8 1341 v.iov_base,
a604ec7e
AV
1342 v.iov_len, 0, &err);
1343 if (!err) {
1344 sum = csum_block_add(sum, next, off);
1345 off += v.iov_len;
1346 }
1347 err ? v.iov_len : 0;
1348 }), ({
1349 char *p = kmap_atomic(v.bv_page);
1350 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1351 p + v.bv_offset,
1352 v.bv_len, 0);
1353 kunmap_atomic(p);
1354 sum = csum_block_add(sum, next, off);
1355 off += v.bv_len;
1356 }),({
1357 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1358 v.iov_base,
1359 v.iov_len, 0);
1360 sum = csum_block_add(sum, next, off);
1361 off += v.iov_len;
1362 })
1363 )
1364 *csum = sum;
1365 return bytes;
1366}
1367EXPORT_SYMBOL(csum_and_copy_to_iter);
1368
62a8067a
AV
1369int iov_iter_npages(const struct iov_iter *i, int maxpages)
1370{
e0f2dc40
AV
1371 size_t size = i->count;
1372 int npages = 0;
1373
1374 if (!size)
1375 return 0;
1376
241699cd
AV
1377 if (unlikely(i->type & ITER_PIPE)) {
1378 struct pipe_inode_info *pipe = i->pipe;
1379 size_t off;
1380 int idx;
1381
1382 if (!sanity(i))
1383 return 0;
1384
1385 data_start(i, &idx, &off);
1386 /* some of this one + all after this one */
1387 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1388 if (npages >= maxpages)
1389 return maxpages;
1390 } else iterate_all_kinds(i, size, v, ({
e0f2dc40
AV
1391 unsigned long p = (unsigned long)v.iov_base;
1392 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1393 - p / PAGE_SIZE;
1394 if (npages >= maxpages)
1395 return maxpages;
1396 0;}),({
1397 npages++;
1398 if (npages >= maxpages)
1399 return maxpages;
a280455f
AV
1400 }),({
1401 unsigned long p = (unsigned long)v.iov_base;
1402 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1403 - p / PAGE_SIZE;
1404 if (npages >= maxpages)
1405 return maxpages;
e0f2dc40
AV
1406 })
1407 )
1408 return npages;
62a8067a 1409}
f67da30c 1410EXPORT_SYMBOL(iov_iter_npages);
4b8164b9
AV
1411
1412const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1413{
1414 *new = *old;
241699cd
AV
1415 if (unlikely(new->type & ITER_PIPE)) {
1416 WARN_ON(1);
1417 return NULL;
1418 }
4b8164b9
AV
1419 if (new->type & ITER_BVEC)
1420 return new->bvec = kmemdup(new->bvec,
1421 new->nr_segs * sizeof(struct bio_vec),
1422 flags);
1423 else
1424 /* iovec and kvec have identical layout */
1425 return new->iov = kmemdup(new->iov,
1426 new->nr_segs * sizeof(struct iovec),
1427 flags);
1428}
1429EXPORT_SYMBOL(dup_iter);
bc917be8 1430
ffecee4f
VN
1431/**
1432 * import_iovec() - Copy an array of &struct iovec from userspace
1433 * into the kernel, check that it is valid, and initialize a new
1434 * &struct iov_iter iterator to access it.
1435 *
1436 * @type: One of %READ or %WRITE.
1437 * @uvector: Pointer to the userspace array.
1438 * @nr_segs: Number of elements in userspace array.
1439 * @fast_segs: Number of elements in @iov.
1440 * @iov: (input and output parameter) Pointer to pointer to (usually small
1441 * on-stack) kernel array.
1442 * @i: Pointer to iterator that will be initialized on success.
1443 *
1444 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1445 * then this function places %NULL in *@iov on return. Otherwise, a new
1446 * array will be allocated and the result placed in *@iov. This means that
1447 * the caller may call kfree() on *@iov regardless of whether the small
1448 * on-stack array was used or not (and regardless of whether this function
1449 * returns an error or not).
1450 *
1451 * Return: 0 on success or negative error code on error.
1452 */
bc917be8
AV
1453int import_iovec(int type, const struct iovec __user * uvector,
1454 unsigned nr_segs, unsigned fast_segs,
1455 struct iovec **iov, struct iov_iter *i)
1456{
1457 ssize_t n;
1458 struct iovec *p;
1459 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1460 *iov, &p);
1461 if (n < 0) {
1462 if (p != *iov)
1463 kfree(p);
1464 *iov = NULL;
1465 return n;
1466 }
1467 iov_iter_init(i, type, p, nr_segs, n);
1468 *iov = p == *iov ? NULL : p;
1469 return 0;
1470}
1471EXPORT_SYMBOL(import_iovec);
1472
1473#ifdef CONFIG_COMPAT
1474#include <linux/compat.h>
1475
1476int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1477 unsigned nr_segs, unsigned fast_segs,
1478 struct iovec **iov, struct iov_iter *i)
1479{
1480 ssize_t n;
1481 struct iovec *p;
1482 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1483 *iov, &p);
1484 if (n < 0) {
1485 if (p != *iov)
1486 kfree(p);
1487 *iov = NULL;
1488 return n;
1489 }
1490 iov_iter_init(i, type, p, nr_segs, n);
1491 *iov = p == *iov ? NULL : p;
1492 return 0;
1493}
1494#endif
1495
1496int import_single_range(int rw, void __user *buf, size_t len,
1497 struct iovec *iov, struct iov_iter *i)
1498{
1499 if (len > MAX_RW_COUNT)
1500 len = MAX_RW_COUNT;
1501 if (unlikely(!access_ok(!rw, buf, len)))
1502 return -EFAULT;
1503
1504 iov->iov_base = buf;
1505 iov->iov_len = len;
1506 iov_iter_init(i, rw, iov, 1, len);
1507 return 0;
1508}
e1267585 1509EXPORT_SYMBOL(import_single_range);
09cf698a
AV
1510
1511int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1512 int (*f)(struct kvec *vec, void *context),
1513 void *context)
1514{
1515 struct kvec w;
1516 int err = -EINVAL;
1517 if (!bytes)
1518 return 0;
1519
1520 iterate_all_kinds(i, bytes, v, -EINVAL, ({
1521 w.iov_base = kmap(v.bv_page) + v.bv_offset;
1522 w.iov_len = v.bv_len;
1523 err = f(&w, context);
1524 kunmap(v.bv_page);
1525 err;}), ({
1526 w = v;
1527 err = f(&w, context);})
1528 )
1529 return err;
1530}
1531EXPORT_SYMBOL(iov_iter_for_each_range);