ASoC: intel: sof_sdw: Remove some extra line breaks
[linux-block.git] / lib / iov_iter.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
7999096f 2#include <crypto/hash.h>
4f18cd31 3#include <linux/export.h>
2f8b5444 4#include <linux/bvec.h>
4d0e9df5 5#include <linux/fault-inject-usercopy.h>
4f18cd31
AV
6#include <linux/uio.h>
7#include <linux/pagemap.h>
28961998 8#include <linux/highmem.h>
91f79c43
AV
9#include <linux/slab.h>
10#include <linux/vmalloc.h>
241699cd 11#include <linux/splice.h>
bfdc5970 12#include <linux/compat.h>
a604ec7e 13#include <net/checksum.h>
d05f4435 14#include <linux/scatterlist.h>
d0ef4c36 15#include <linux/instrumented.h>
4f18cd31 16
fcb14cb1
AV
17/* covers ubuf and kbuf alike */
18#define iterate_buf(i, n, base, len, off, __p, STEP) { \
19 size_t __maybe_unused off = 0; \
20 len = n; \
21 base = __p + i->iov_offset; \
22 len -= (STEP); \
23 i->iov_offset += len; \
24 n = len; \
25}
26
5c67aa90 27/* covers iovec and kvec alike */
a6e4ec7b 28#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
7baa5099 29 size_t off = 0; \
a6e4ec7b 30 size_t skip = i->iov_offset; \
7a1bcb5d 31 do { \
7baa5099
AV
32 len = min(n, __p->iov_len - skip); \
33 if (likely(len)) { \
34 base = __p->iov_base + skip; \
35 len -= (STEP); \
36 off += len; \
37 skip += len; \
38 n -= len; \
7a1bcb5d
AV
39 if (skip < __p->iov_len) \
40 break; \
41 } \
42 __p++; \
43 skip = 0; \
44 } while (n); \
a6e4ec7b 45 i->iov_offset = skip; \
7baa5099 46 n = off; \
04a31165
AV
47}
48
a6e4ec7b 49#define iterate_bvec(i, n, base, len, off, p, STEP) { \
7baa5099 50 size_t off = 0; \
a6e4ec7b 51 unsigned skip = i->iov_offset; \
7491a2bf
AV
52 while (n) { \
53 unsigned offset = p->bv_offset + skip; \
1b4fb5ff 54 unsigned left; \
21b56c84
AV
55 void *kaddr = kmap_local_page(p->bv_page + \
56 offset / PAGE_SIZE); \
7baa5099 57 base = kaddr + offset % PAGE_SIZE; \
a6e4ec7b 58 len = min(min(n, (size_t)(p->bv_len - skip)), \
7491a2bf 59 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
1b4fb5ff 60 left = (STEP); \
21b56c84 61 kunmap_local(kaddr); \
7baa5099
AV
62 len -= left; \
63 off += len; \
64 skip += len; \
7491a2bf
AV
65 if (skip == p->bv_len) { \
66 skip = 0; \
67 p++; \
68 } \
7baa5099 69 n -= len; \
1b4fb5ff
AV
70 if (left) \
71 break; \
7491a2bf 72 } \
a6e4ec7b 73 i->iov_offset = skip; \
7baa5099 74 n = off; \
04a31165
AV
75}
76
a6e4ec7b 77#define iterate_xarray(i, n, base, len, __off, STEP) { \
1b4fb5ff 78 __label__ __out; \
622838f3 79 size_t __off = 0; \
821979f5 80 struct folio *folio; \
a6e4ec7b 81 loff_t start = i->xarray_start + i->iov_offset; \
4b179e9a 82 pgoff_t index = start / PAGE_SIZE; \
7ff50620
DH
83 XA_STATE(xas, i->xarray, index); \
84 \
821979f5 85 len = PAGE_SIZE - offset_in_page(start); \
7baa5099 86 rcu_read_lock(); \
821979f5 87 xas_for_each(&xas, folio, ULONG_MAX) { \
7baa5099 88 unsigned left; \
821979f5
MWO
89 size_t offset; \
90 if (xas_retry(&xas, folio)) \
7baa5099 91 continue; \
821979f5 92 if (WARN_ON(xa_is_value(folio))) \
7baa5099 93 break; \
821979f5 94 if (WARN_ON(folio_test_hugetlb(folio))) \
7baa5099 95 break; \
821979f5
MWO
96 offset = offset_in_folio(folio, start + __off); \
97 while (offset < folio_size(folio)) { \
98 base = kmap_local_folio(folio, offset); \
7baa5099
AV
99 len = min(n, len); \
100 left = (STEP); \
821979f5 101 kunmap_local(base); \
7baa5099
AV
102 len -= left; \
103 __off += len; \
104 n -= len; \
105 if (left || n == 0) \
106 goto __out; \
821979f5
MWO
107 offset += len; \
108 len = PAGE_SIZE; \
7baa5099 109 } \
7ff50620 110 } \
1b4fb5ff 111__out: \
7ff50620 112 rcu_read_unlock(); \
821979f5 113 i->iov_offset += __off; \
622838f3 114 n = __off; \
7ff50620
DH
115}
116
7baa5099 117#define __iterate_and_advance(i, n, base, len, off, I, K) { \
dd254f5a
AV
118 if (unlikely(i->count < n)) \
119 n = i->count; \
f5da8354 120 if (likely(n)) { \
fcb14cb1
AV
121 if (likely(iter_is_ubuf(i))) { \
122 void __user *base; \
123 size_t len; \
124 iterate_buf(i, n, base, len, off, \
125 i->ubuf, (I)) \
126 } else if (likely(iter_is_iovec(i))) { \
de4f5fed 127 const struct iovec *iov = iter_iov(i); \
7baa5099
AV
128 void __user *base; \
129 size_t len; \
130 iterate_iovec(i, n, base, len, off, \
a6e4ec7b 131 iov, (I)) \
de4f5fed
JA
132 i->nr_segs -= iov - iter_iov(i); \
133 i->__iov = iov; \
28f38db7 134 } else if (iov_iter_is_bvec(i)) { \
1bdc76ae 135 const struct bio_vec *bvec = i->bvec; \
7baa5099
AV
136 void *base; \
137 size_t len; \
138 iterate_bvec(i, n, base, len, off, \
a6e4ec7b 139 bvec, (K)) \
7491a2bf
AV
140 i->nr_segs -= bvec - i->bvec; \
141 i->bvec = bvec; \
28f38db7 142 } else if (iov_iter_is_kvec(i)) { \
5c67aa90 143 const struct kvec *kvec = i->kvec; \
7baa5099
AV
144 void *base; \
145 size_t len; \
146 iterate_iovec(i, n, base, len, off, \
a6e4ec7b 147 kvec, (K)) \
dd254f5a
AV
148 i->nr_segs -= kvec - i->kvec; \
149 i->kvec = kvec; \
28f38db7 150 } else if (iov_iter_is_xarray(i)) { \
7baa5099
AV
151 void *base; \
152 size_t len; \
153 iterate_xarray(i, n, base, len, off, \
a6e4ec7b 154 (K)) \
7ce2a91e 155 } \
dd254f5a 156 i->count -= n; \
7ce2a91e 157 } \
7ce2a91e 158}
7baa5099
AV
159#define iterate_and_advance(i, n, base, len, off, I, K) \
160 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
7ce2a91e 161
09fc68dc
AV
162static int copyout(void __user *to, const void *from, size_t n)
163{
4d0e9df5
AL
164 if (should_fail_usercopy())
165 return n;
96d4f267 166 if (access_ok(to, n)) {
d0ef4c36 167 instrument_copy_to_user(to, from, n);
09fc68dc
AV
168 n = raw_copy_to_user(to, from, n);
169 }
170 return n;
171}
172
4f80818b
LS
173static int copyout_nofault(void __user *to, const void *from, size_t n)
174{
175 long res;
176
177 if (should_fail_usercopy())
178 return n;
179
180 res = copy_to_user_nofault(to, from, n);
181
182 return res < 0 ? n : res;
183}
184
09fc68dc
AV
185static int copyin(void *to, const void __user *from, size_t n)
186{
33b75c1d
AP
187 size_t res = n;
188
4d0e9df5
AL
189 if (should_fail_usercopy())
190 return n;
96d4f267 191 if (access_ok(from, n)) {
33b75c1d
AP
192 instrument_copy_from_user_before(to, from, n);
193 res = raw_copy_from_user(to, from, n);
194 instrument_copy_from_user_after(to, from, n, res);
09fc68dc 195 }
33b75c1d 196 return res;
09fc68dc
AV
197}
198
171a0203 199/*
a6294593
AG
200 * fault_in_iov_iter_readable - fault in iov iterator for reading
201 * @i: iterator
202 * @size: maximum length
203 *
171a0203 204 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
a6294593
AG
205 * @size. For each iovec, fault in each page that constitutes the iovec.
206 *
207 * Returns the number of bytes not faulted in (like copy_to_user() and
208 * copy_from_user()).
171a0203 209 *
a6294593 210 * Always returns 0 for non-userspace iterators.
171a0203 211 */
a6294593 212size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
171a0203 213{
fcb14cb1
AV
214 if (iter_is_ubuf(i)) {
215 size_t n = min(size, iov_iter_count(i));
216 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
217 return size - n;
218 } else if (iter_is_iovec(i)) {
a6294593 219 size_t count = min(size, iov_iter_count(i));
8409a0d2
AV
220 const struct iovec *p;
221 size_t skip;
222
a6294593 223 size -= count;
de4f5fed 224 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
a6294593
AG
225 size_t len = min(count, p->iov_len - skip);
226 size_t ret;
8409a0d2
AV
227
228 if (unlikely(!len))
229 continue;
a6294593
AG
230 ret = fault_in_readable(p->iov_base + skip, len);
231 count -= len - ret;
232 if (ret)
233 break;
8409a0d2 234 }
a6294593 235 return count + size;
171a0203
AA
236 }
237 return 0;
238}
a6294593 239EXPORT_SYMBOL(fault_in_iov_iter_readable);
171a0203 240
cdd591fc
AG
241/*
242 * fault_in_iov_iter_writeable - fault in iov iterator for writing
243 * @i: iterator
244 * @size: maximum length
245 *
246 * Faults in the iterator using get_user_pages(), i.e., without triggering
247 * hardware page faults. This is primarily useful when we already know that
248 * some or all of the pages in @i aren't in memory.
249 *
250 * Returns the number of bytes not faulted in, like copy_to_user() and
251 * copy_from_user().
252 *
253 * Always returns 0 for non-user-space iterators.
254 */
255size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
256{
fcb14cb1
AV
257 if (iter_is_ubuf(i)) {
258 size_t n = min(size, iov_iter_count(i));
259 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
260 return size - n;
261 } else if (iter_is_iovec(i)) {
cdd591fc
AG
262 size_t count = min(size, iov_iter_count(i));
263 const struct iovec *p;
264 size_t skip;
265
266 size -= count;
de4f5fed 267 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
cdd591fc
AG
268 size_t len = min(count, p->iov_len - skip);
269 size_t ret;
270
271 if (unlikely(!len))
272 continue;
273 ret = fault_in_safe_writeable(p->iov_base + skip, len);
274 count -= len - ret;
275 if (ret)
276 break;
277 }
278 return count + size;
279 }
280 return 0;
281}
282EXPORT_SYMBOL(fault_in_iov_iter_writeable);
283
aa563d7b 284void iov_iter_init(struct iov_iter *i, unsigned int direction,
71d8e532
AV
285 const struct iovec *iov, unsigned long nr_segs,
286 size_t count)
287{
aa563d7b 288 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
289 *i = (struct iov_iter) {
290 .iter_type = ITER_IOVEC,
245f0922 291 .copy_mc = false,
3337ab08 292 .nofault = false,
fcb14cb1 293 .user_backed = true,
8cd54c1c 294 .data_source = direction,
de4f5fed 295 .__iov = iov,
8cd54c1c
AV
296 .nr_segs = nr_segs,
297 .iov_offset = 0,
298 .count = count
299 };
71d8e532
AV
300}
301EXPORT_SYMBOL(iov_iter_init);
7b2c99d1 302
f9152895
AV
303static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
304 __wsum sum, size_t off)
305{
cc44c17b 306 __wsum next = csum_partial_copy_nocheck(from, to, len);
f9152895
AV
307 return csum_block_add(sum, next, off);
308}
309
aa28de27 310size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
62a8067a 311{
a41dad90
AV
312 if (WARN_ON_ONCE(i->data_source))
313 return 0;
fcb14cb1 314 if (user_backed_iter(i))
09fc68dc 315 might_fault();
7baa5099
AV
316 iterate_and_advance(i, bytes, base, len, off,
317 copyout(base, addr + off, len),
318 memcpy(base, addr + off, len)
3d4d3e48 319 )
62a8067a 320
3d4d3e48 321 return bytes;
c35e0248 322}
aa28de27 323EXPORT_SYMBOL(_copy_to_iter);
c35e0248 324
ec6347bb
DW
325#ifdef CONFIG_ARCH_HAS_COPY_MC
326static int copyout_mc(void __user *to, const void *from, size_t n)
8780356e 327{
96d4f267 328 if (access_ok(to, n)) {
d0ef4c36 329 instrument_copy_to_user(to, from, n);
ec6347bb 330 n = copy_mc_to_user((__force void *) to, from, n);
8780356e
DW
331 }
332 return n;
333}
334
bf3eeb9b 335/**
ec6347bb 336 * _copy_mc_to_iter - copy to iter with source memory error exception handling
bf3eeb9b
DW
337 * @addr: source kernel address
338 * @bytes: total transfer length
44e55997 339 * @i: destination iterator
bf3eeb9b 340 *
ec6347bb
DW
341 * The pmem driver deploys this for the dax operation
342 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
343 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
344 * successfully copied.
bf3eeb9b 345 *
ec6347bb 346 * The main differences between this and typical _copy_to_iter().
bf3eeb9b
DW
347 *
348 * * Typical tail/residue handling after a fault retries the copy
349 * byte-by-byte until the fault happens again. Re-triggering machine
350 * checks is potentially fatal so the implementation uses source
351 * alignment and poison alignment assumptions to avoid re-triggering
352 * hardware exceptions.
353 *
3fc40265
DH
354 * * ITER_KVEC and ITER_BVEC can return short copies. Compare to
355 * copy_to_iter() where only ITER_IOVEC attempts might return a short copy.
44e55997
RD
356 *
357 * Return: number of bytes copied (may be %0)
bf3eeb9b 358 */
ec6347bb 359size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
8780356e 360{
a41dad90
AV
361 if (WARN_ON_ONCE(i->data_source))
362 return 0;
fcb14cb1 363 if (user_backed_iter(i))
8780356e 364 might_fault();
7baa5099
AV
365 __iterate_and_advance(i, bytes, base, len, off,
366 copyout_mc(base, addr + off, len),
367 copy_mc_to_kernel(base, addr + off, len)
8780356e
DW
368 )
369
370 return bytes;
371}
ec6347bb
DW
372EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
373#endif /* CONFIG_ARCH_HAS_COPY_MC */
8780356e 374
245f0922
KW
375static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from,
376 size_t size)
377{
378 if (iov_iter_is_copy_mc(i))
379 return (void *)copy_mc_to_kernel(to, from, size);
380 return memcpy(to, from, size);
381}
382
aa28de27 383size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
c35e0248 384{
a41dad90 385 if (WARN_ON_ONCE(!i->data_source))
241699cd 386 return 0;
a41dad90 387
fcb14cb1 388 if (user_backed_iter(i))
09fc68dc 389 might_fault();
7baa5099
AV
390 iterate_and_advance(i, bytes, base, len, off,
391 copyin(addr + off, base, len),
245f0922 392 memcpy_from_iter(i, addr + off, base, len)
0dbca9a4
AV
393 )
394
395 return bytes;
c35e0248 396}
aa28de27 397EXPORT_SYMBOL(_copy_from_iter);
c35e0248 398
aa28de27 399size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
aa583096 400{
a41dad90 401 if (WARN_ON_ONCE(!i->data_source))
241699cd 402 return 0;
a41dad90 403
7baa5099
AV
404 iterate_and_advance(i, bytes, base, len, off,
405 __copy_from_user_inatomic_nocache(addr + off, base, len),
406 memcpy(addr + off, base, len)
aa583096
AV
407 )
408
409 return bytes;
410}
aa28de27 411EXPORT_SYMBOL(_copy_from_iter_nocache);
aa583096 412
0aed55af 413#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
abd08d7d
DW
414/**
415 * _copy_from_iter_flushcache - write destination through cpu cache
416 * @addr: destination kernel address
417 * @bytes: total transfer length
44e55997 418 * @i: source iterator
abd08d7d
DW
419 *
420 * The pmem driver arranges for filesystem-dax to use this facility via
421 * dax_copy_from_iter() for ensuring that writes to persistent memory
422 * are flushed through the CPU cache. It is differentiated from
423 * _copy_from_iter_nocache() in that guarantees all data is flushed for
424 * all iterator types. The _copy_from_iter_nocache() only attempts to
425 * bypass the cache for the ITER_IOVEC case, and on some archs may use
426 * instructions that strand dirty-data in the cache.
44e55997
RD
427 *
428 * Return: number of bytes copied (may be %0)
abd08d7d 429 */
6a37e940 430size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
0aed55af 431{
a41dad90 432 if (WARN_ON_ONCE(!i->data_source))
0aed55af 433 return 0;
a41dad90 434
7baa5099
AV
435 iterate_and_advance(i, bytes, base, len, off,
436 __copy_from_user_flushcache(addr + off, base, len),
437 memcpy_flushcache(addr + off, base, len)
0aed55af
DW
438 )
439
440 return bytes;
441}
6a37e940 442EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
0aed55af
DW
443#endif
444
72e809ed
AV
445static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
446{
6daef95b
ED
447 struct page *head;
448 size_t v = n + offset;
449
450 /*
451 * The general case needs to access the page order in order
452 * to compute the page size.
453 * However, we mostly deal with order-0 pages and thus can
454 * avoid a possible cache line miss for requests that fit all
455 * page orders.
456 */
457 if (n <= v && v <= PAGE_SIZE)
458 return true;
459
460 head = compound_head(page);
461 v += (page - head) << PAGE_SHIFT;
a90bcb86 462
40a86061
AV
463 if (WARN_ON(n > v || v > page_size(head)))
464 return false;
465 return true;
72e809ed 466}
cbbd26b8 467
08aa6479
AV
468size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
469 struct iov_iter *i)
470{
471 size_t res = 0;
40a86061 472 if (!page_copy_sane(page, offset, bytes))
08aa6479 473 return 0;
a41dad90
AV
474 if (WARN_ON_ONCE(i->data_source))
475 return 0;
08aa6479
AV
476 page += offset / PAGE_SIZE; // first subpage
477 offset %= PAGE_SIZE;
478 while (1) {
f0f6b614
AV
479 void *kaddr = kmap_local_page(page);
480 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
481 n = _copy_to_iter(kaddr + offset, n, i);
482 kunmap_local(kaddr);
08aa6479
AV
483 res += n;
484 bytes -= n;
485 if (!bytes || !n)
486 break;
487 offset += n;
488 if (offset == PAGE_SIZE) {
489 page++;
490 offset = 0;
491 }
492 }
493 return res;
494}
62a8067a
AV
495EXPORT_SYMBOL(copy_page_to_iter);
496
4f80818b
LS
497size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes,
498 struct iov_iter *i)
499{
500 size_t res = 0;
501
502 if (!page_copy_sane(page, offset, bytes))
503 return 0;
504 if (WARN_ON_ONCE(i->data_source))
505 return 0;
4f80818b
LS
506 page += offset / PAGE_SIZE; // first subpage
507 offset %= PAGE_SIZE;
508 while (1) {
509 void *kaddr = kmap_local_page(page);
510 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
511
512 iterate_and_advance(i, n, base, len, off,
513 copyout_nofault(base, kaddr + offset + off, len),
514 memcpy(base, kaddr + offset + off, len)
515 )
516 kunmap_local(kaddr);
517 res += n;
518 bytes -= n;
519 if (!bytes || !n)
520 break;
521 offset += n;
522 if (offset == PAGE_SIZE) {
523 page++;
524 offset = 0;
525 }
526 }
527 return res;
528}
529EXPORT_SYMBOL(copy_page_to_iter_nofault);
530
62a8067a
AV
531size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
532 struct iov_iter *i)
533{
c03f05f1
AV
534 size_t res = 0;
535 if (!page_copy_sane(page, offset, bytes))
536 return 0;
537 page += offset / PAGE_SIZE; // first subpage
538 offset %= PAGE_SIZE;
539 while (1) {
55ca375c 540 void *kaddr = kmap_local_page(page);
c03f05f1
AV
541 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
542 n = _copy_from_iter(kaddr + offset, n, i);
55ca375c 543 kunmap_local(kaddr);
c03f05f1
AV
544 res += n;
545 bytes -= n;
546 if (!bytes || !n)
547 break;
548 offset += n;
549 if (offset == PAGE_SIZE) {
550 page++;
551 offset = 0;
552 }
28f38db7 553 }
c03f05f1 554 return res;
62a8067a
AV
555}
556EXPORT_SYMBOL(copy_page_from_iter);
557
c35e0248
MW
558size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
559{
7baa5099
AV
560 iterate_and_advance(i, bytes, base, len, count,
561 clear_user(base, len),
562 memset(base, 0, len)
8442fa46
AV
563 )
564
565 return bytes;
c35e0248
MW
566}
567EXPORT_SYMBOL(iov_iter_zero);
568
f0b65f39
AV
569size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
570 struct iov_iter *i)
62a8067a 571{
04a31165 572 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
40a86061 573 if (!page_copy_sane(page, offset, bytes)) {
72e809ed
AV
574 kunmap_atomic(kaddr);
575 return 0;
576 }
a41dad90 577 if (WARN_ON_ONCE(!i->data_source)) {
241699cd 578 kunmap_atomic(kaddr);
241699cd
AV
579 return 0;
580 }
7baa5099
AV
581 iterate_and_advance(i, bytes, base, len, off,
582 copyin(p + off, base, len),
245f0922 583 memcpy_from_iter(i, p + off, base, len)
04a31165
AV
584 )
585 kunmap_atomic(kaddr);
586 return bytes;
62a8067a 587}
f0b65f39 588EXPORT_SYMBOL(copy_page_from_iter_atomic);
62a8067a 589
54c8195b
PB
590static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
591{
18fa9af7 592 const struct bio_vec *bvec, *end;
54c8195b 593
18fa9af7
AV
594 if (!i->count)
595 return;
596 i->count -= size;
597
598 size += i->iov_offset;
54c8195b 599
18fa9af7
AV
600 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
601 if (likely(size < bvec->bv_len))
602 break;
603 size -= bvec->bv_len;
604 }
605 i->iov_offset = size;
606 i->nr_segs -= bvec - i->bvec;
607 i->bvec = bvec;
54c8195b
PB
608}
609
185ac4d4
AV
610static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
611{
612 const struct iovec *iov, *end;
613
614 if (!i->count)
615 return;
616 i->count -= size;
617
618 size += i->iov_offset; // from beginning of current segment
de4f5fed 619 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
185ac4d4
AV
620 if (likely(size < iov->iov_len))
621 break;
622 size -= iov->iov_len;
623 }
624 i->iov_offset = size;
de4f5fed
JA
625 i->nr_segs -= iov - iter_iov(i);
626 i->__iov = iov;
185ac4d4
AV
627}
628
62a8067a
AV
629void iov_iter_advance(struct iov_iter *i, size_t size)
630{
3b3fc051
AV
631 if (unlikely(i->count < size))
632 size = i->count;
fcb14cb1
AV
633 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
634 i->iov_offset += size;
635 i->count -= size;
636 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
185ac4d4
AV
637 /* iovec and kvec have identical layouts */
638 iov_iter_iovec_advance(i, size);
639 } else if (iov_iter_is_bvec(i)) {
640 iov_iter_bvec_advance(i, size);
185ac4d4
AV
641 } else if (iov_iter_is_discard(i)) {
642 i->count -= size;
54c8195b 643 }
62a8067a
AV
644}
645EXPORT_SYMBOL(iov_iter_advance);
646
27c0e374
AV
647void iov_iter_revert(struct iov_iter *i, size_t unroll)
648{
649 if (!unroll)
650 return;
5b47d59a
AV
651 if (WARN_ON(unroll > MAX_RW_COUNT))
652 return;
27c0e374 653 i->count += unroll;
9ea9ce04
DH
654 if (unlikely(iov_iter_is_discard(i)))
655 return;
27c0e374
AV
656 if (unroll <= i->iov_offset) {
657 i->iov_offset -= unroll;
658 return;
659 }
660 unroll -= i->iov_offset;
fcb14cb1 661 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
7ff50620
DH
662 BUG(); /* We should never go beyond the start of the specified
663 * range since we might then be straying into pages that
664 * aren't pinned.
665 */
666 } else if (iov_iter_is_bvec(i)) {
27c0e374
AV
667 const struct bio_vec *bvec = i->bvec;
668 while (1) {
669 size_t n = (--bvec)->bv_len;
670 i->nr_segs++;
671 if (unroll <= n) {
672 i->bvec = bvec;
673 i->iov_offset = n - unroll;
674 return;
675 }
676 unroll -= n;
677 }
678 } else { /* same logics for iovec and kvec */
de4f5fed 679 const struct iovec *iov = iter_iov(i);
27c0e374
AV
680 while (1) {
681 size_t n = (--iov)->iov_len;
682 i->nr_segs++;
683 if (unroll <= n) {
de4f5fed 684 i->__iov = iov;
27c0e374
AV
685 i->iov_offset = n - unroll;
686 return;
687 }
688 unroll -= n;
689 }
690 }
691}
692EXPORT_SYMBOL(iov_iter_revert);
693
62a8067a
AV
694/*
695 * Return the count of just the current iov_iter segment.
696 */
697size_t iov_iter_single_seg_count(const struct iov_iter *i)
698{
28f38db7
AV
699 if (i->nr_segs > 1) {
700 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
de4f5fed 701 return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
28f38db7
AV
702 if (iov_iter_is_bvec(i))
703 return min(i->count, i->bvec->bv_len - i->iov_offset);
704 }
705 return i->count;
62a8067a
AV
706}
707EXPORT_SYMBOL(iov_iter_single_seg_count);
708
aa563d7b 709void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
05afcb77 710 const struct kvec *kvec, unsigned long nr_segs,
abb78f87
AV
711 size_t count)
712{
aa563d7b 713 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
714 *i = (struct iov_iter){
715 .iter_type = ITER_KVEC,
245f0922 716 .copy_mc = false,
8cd54c1c
AV
717 .data_source = direction,
718 .kvec = kvec,
719 .nr_segs = nr_segs,
720 .iov_offset = 0,
721 .count = count
722 };
abb78f87
AV
723}
724EXPORT_SYMBOL(iov_iter_kvec);
725
aa563d7b 726void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
05afcb77
AV
727 const struct bio_vec *bvec, unsigned long nr_segs,
728 size_t count)
729{
aa563d7b 730 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
731 *i = (struct iov_iter){
732 .iter_type = ITER_BVEC,
245f0922 733 .copy_mc = false,
8cd54c1c
AV
734 .data_source = direction,
735 .bvec = bvec,
736 .nr_segs = nr_segs,
737 .iov_offset = 0,
738 .count = count
739 };
05afcb77
AV
740}
741EXPORT_SYMBOL(iov_iter_bvec);
742
7ff50620
DH
743/**
744 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
745 * @i: The iterator to initialise.
746 * @direction: The direction of the transfer.
747 * @xarray: The xarray to access.
748 * @start: The start file position.
749 * @count: The size of the I/O buffer in bytes.
750 *
751 * Set up an I/O iterator to either draw data out of the pages attached to an
752 * inode or to inject data into those pages. The pages *must* be prevented
753 * from evaporation, either by taking a ref on them or locking them by the
754 * caller.
755 */
756void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
757 struct xarray *xarray, loff_t start, size_t count)
758{
759 BUG_ON(direction & ~1);
8cd54c1c
AV
760 *i = (struct iov_iter) {
761 .iter_type = ITER_XARRAY,
245f0922 762 .copy_mc = false,
8cd54c1c
AV
763 .data_source = direction,
764 .xarray = xarray,
765 .xarray_start = start,
766 .count = count,
767 .iov_offset = 0
768 };
7ff50620
DH
769}
770EXPORT_SYMBOL(iov_iter_xarray);
771
9ea9ce04
DH
772/**
773 * iov_iter_discard - Initialise an I/O iterator that discards data
774 * @i: The iterator to initialise.
775 * @direction: The direction of the transfer.
776 * @count: The size of the I/O buffer in bytes.
777 *
778 * Set up an I/O iterator that just discards everything that's written to it.
779 * It's only available as a READ iterator.
780 */
781void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
782{
783 BUG_ON(direction != READ);
8cd54c1c
AV
784 *i = (struct iov_iter){
785 .iter_type = ITER_DISCARD,
245f0922 786 .copy_mc = false,
8cd54c1c
AV
787 .data_source = false,
788 .count = count,
789 .iov_offset = 0
790 };
9ea9ce04
DH
791}
792EXPORT_SYMBOL(iov_iter_discard);
793
cfa320f7
KB
794static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
795 unsigned len_mask)
796{
797 size_t size = i->count;
798 size_t skip = i->iov_offset;
799 unsigned k;
800
801 for (k = 0; k < i->nr_segs; k++, skip = 0) {
de4f5fed
JA
802 const struct iovec *iov = iter_iov(i) + k;
803 size_t len = iov->iov_len - skip;
cfa320f7
KB
804
805 if (len > size)
806 len = size;
807 if (len & len_mask)
808 return false;
de4f5fed 809 if ((unsigned long)(iov->iov_base + skip) & addr_mask)
cfa320f7
KB
810 return false;
811
812 size -= len;
813 if (!size)
814 break;
815 }
816 return true;
817}
818
819static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
820 unsigned len_mask)
821{
822 size_t size = i->count;
823 unsigned skip = i->iov_offset;
824 unsigned k;
825
826 for (k = 0; k < i->nr_segs; k++, skip = 0) {
827 size_t len = i->bvec[k].bv_len - skip;
828
829 if (len > size)
830 len = size;
831 if (len & len_mask)
832 return false;
833 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
834 return false;
835
836 size -= len;
837 if (!size)
838 break;
839 }
840 return true;
841}
842
843/**
844 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
845 * are aligned to the parameters.
846 *
847 * @i: &struct iov_iter to restore
848 * @addr_mask: bit mask to check against the iov element's addresses
849 * @len_mask: bit mask to check against the iov element's lengths
850 *
851 * Return: false if any addresses or lengths intersect with the provided masks
852 */
853bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
854 unsigned len_mask)
855{
fcb14cb1
AV
856 if (likely(iter_is_ubuf(i))) {
857 if (i->count & len_mask)
858 return false;
859 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
860 return false;
861 return true;
862 }
863
cfa320f7
KB
864 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
865 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
866
867 if (iov_iter_is_bvec(i))
868 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
869
cfa320f7
KB
870 if (iov_iter_is_xarray(i)) {
871 if (i->count & len_mask)
872 return false;
873 if ((i->xarray_start + i->iov_offset) & addr_mask)
874 return false;
875 }
876
877 return true;
878}
879EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
880
9221d2e3 881static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
62a8067a 882{
04a31165
AV
883 unsigned long res = 0;
884 size_t size = i->count;
9221d2e3
AV
885 size_t skip = i->iov_offset;
886 unsigned k;
887
888 for (k = 0; k < i->nr_segs; k++, skip = 0) {
de4f5fed
JA
889 const struct iovec *iov = iter_iov(i) + k;
890 size_t len = iov->iov_len - skip;
9221d2e3 891 if (len) {
de4f5fed 892 res |= (unsigned long)iov->iov_base + skip;
9221d2e3
AV
893 if (len > size)
894 len = size;
895 res |= len;
896 size -= len;
897 if (!size)
898 break;
899 }
900 }
901 return res;
902}
04a31165 903
9221d2e3
AV
904static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
905{
906 unsigned res = 0;
907 size_t size = i->count;
908 unsigned skip = i->iov_offset;
909 unsigned k;
910
911 for (k = 0; k < i->nr_segs; k++, skip = 0) {
912 size_t len = i->bvec[k].bv_len - skip;
913 res |= (unsigned long)i->bvec[k].bv_offset + skip;
914 if (len > size)
915 len = size;
916 res |= len;
917 size -= len;
918 if (!size)
919 break;
920 }
921 return res;
922}
923
924unsigned long iov_iter_alignment(const struct iov_iter *i)
925{
fcb14cb1
AV
926 if (likely(iter_is_ubuf(i))) {
927 size_t size = i->count;
928 if (size)
929 return ((unsigned long)i->ubuf + i->iov_offset) | size;
930 return 0;
931 }
932
9221d2e3
AV
933 /* iovec and kvec have identical layouts */
934 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
935 return iov_iter_alignment_iovec(i);
936
937 if (iov_iter_is_bvec(i))
938 return iov_iter_alignment_bvec(i);
939
9221d2e3 940 if (iov_iter_is_xarray(i))
3d14ec1f 941 return (i->xarray_start + i->iov_offset) | i->count;
9221d2e3
AV
942
943 return 0;
62a8067a
AV
944}
945EXPORT_SYMBOL(iov_iter_alignment);
946
357f435d
AV
947unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
948{
33844e66 949 unsigned long res = 0;
610c7a71 950 unsigned long v = 0;
357f435d 951 size_t size = i->count;
610c7a71 952 unsigned k;
357f435d 953
fcb14cb1
AV
954 if (iter_is_ubuf(i))
955 return 0;
956
610c7a71 957 if (WARN_ON(!iter_is_iovec(i)))
241699cd 958 return ~0U;
241699cd 959
610c7a71 960 for (k = 0; k < i->nr_segs; k++) {
de4f5fed
JA
961 const struct iovec *iov = iter_iov(i) + k;
962 if (iov->iov_len) {
963 unsigned long base = (unsigned long)iov->iov_base;
610c7a71
AV
964 if (v) // if not the first one
965 res |= base | v; // this start | previous end
de4f5fed
JA
966 v = base + iov->iov_len;
967 if (size <= iov->iov_len)
610c7a71 968 break;
de4f5fed 969 size -= iov->iov_len;
610c7a71
AV
970 }
971 }
33844e66 972 return res;
357f435d
AV
973}
974EXPORT_SYMBOL(iov_iter_gap_alignment);
975
3cf42da3
AV
976static int want_pages_array(struct page ***res, size_t size,
977 size_t start, unsigned int maxpages)
acbdeb83 978{
3cf42da3
AV
979 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
980
981 if (count > maxpages)
982 count = maxpages;
983 WARN_ON(!count); // caller should've prevented that
984 if (!*res) {
985 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
986 if (!*res)
987 return 0;
988 }
989 return count;
acbdeb83
AV
990}
991
7ff50620
DH
992static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
993 pgoff_t index, unsigned int nr_pages)
994{
995 XA_STATE(xas, xa, index);
996 struct page *page;
997 unsigned int ret = 0;
998
999 rcu_read_lock();
1000 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1001 if (xas_retry(&xas, page))
1002 continue;
1003
1004 /* Has the page moved or been split? */
1005 if (unlikely(page != xas_reload(&xas))) {
1006 xas_reset(&xas);
1007 continue;
1008 }
1009
1010 pages[ret] = find_subpage(page, xas.xa_index);
1011 get_page(pages[ret]);
1012 if (++ret == nr_pages)
1013 break;
1014 }
1015 rcu_read_unlock();
1016 return ret;
1017}
1018
1019static ssize_t iter_xarray_get_pages(struct iov_iter *i,
68fe506f 1020 struct page ***pages, size_t maxsize,
7ff50620
DH
1021 unsigned maxpages, size_t *_start_offset)
1022{
3cf42da3
AV
1023 unsigned nr, offset, count;
1024 pgoff_t index;
7ff50620
DH
1025 loff_t pos;
1026
7ff50620
DH
1027 pos = i->xarray_start + i->iov_offset;
1028 index = pos >> PAGE_SHIFT;
1029 offset = pos & ~PAGE_MASK;
1030 *_start_offset = offset;
1031
3cf42da3
AV
1032 count = want_pages_array(pages, maxsize, offset, maxpages);
1033 if (!count)
1034 return -ENOMEM;
68fe506f 1035 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
7ff50620
DH
1036 if (nr == 0)
1037 return 0;
1038
eba2d3d7 1039 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
310d9d5a
AV
1040 i->iov_offset += maxsize;
1041 i->count -= maxsize;
eba2d3d7 1042 return maxsize;
7ff50620
DH
1043}
1044
fcb14cb1 1045/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
dd45ab9d 1046static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
3d671ca6
AV
1047{
1048 size_t skip;
1049 long k;
1050
fcb14cb1
AV
1051 if (iter_is_ubuf(i))
1052 return (unsigned long)i->ubuf + i->iov_offset;
1053
3d671ca6 1054 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
de4f5fed
JA
1055 const struct iovec *iov = iter_iov(i) + k;
1056 size_t len = iov->iov_len - skip;
3d671ca6
AV
1057
1058 if (unlikely(!len))
1059 continue;
59dbd7d0
AV
1060 if (*size > len)
1061 *size = len;
de4f5fed 1062 return (unsigned long)iov->iov_base + skip;
3d671ca6
AV
1063 }
1064 BUG(); // if it had been empty, we wouldn't get called
1065}
1066
1067/* must be done on non-empty ITER_BVEC one */
1068static struct page *first_bvec_segment(const struct iov_iter *i,
59dbd7d0 1069 size_t *size, size_t *start)
3d671ca6
AV
1070{
1071 struct page *page;
1072 size_t skip = i->iov_offset, len;
1073
1074 len = i->bvec->bv_len - skip;
59dbd7d0
AV
1075 if (*size > len)
1076 *size = len;
3d671ca6
AV
1077 skip += i->bvec->bv_offset;
1078 page = i->bvec->bv_page + skip / PAGE_SIZE;
dda8e5d1 1079 *start = skip % PAGE_SIZE;
3d671ca6
AV
1080 return page;
1081}
1082
451c0ba9
AV
1083static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
1084 struct page ***pages, size_t maxsize,
84bd06c6 1085 unsigned int maxpages, size_t *start)
62a8067a 1086{
f62e52d1 1087 unsigned int n, gup_flags = 0;
3d671ca6 1088
e5393fae
AV
1089 if (maxsize > i->count)
1090 maxsize = i->count;
451c0ba9 1091 if (!maxsize)
3d671ca6 1092 return 0;
7392ed17
AV
1093 if (maxsize > MAX_RW_COUNT)
1094 maxsize = MAX_RW_COUNT;
e5393fae 1095
fcb14cb1 1096 if (likely(user_backed_iter(i))) {
3d671ca6 1097 unsigned long addr;
3cf42da3 1098 int res;
e5393fae 1099
3337ab08
AG
1100 if (iov_iter_rw(i) != WRITE)
1101 gup_flags |= FOLL_WRITE;
1102 if (i->nofault)
1103 gup_flags |= FOLL_NOFAULT;
1104
dd45ab9d
AV
1105 addr = first_iovec_segment(i, &maxsize);
1106 *start = addr % PAGE_SIZE;
1107 addr &= PAGE_MASK;
3cf42da3
AV
1108 n = want_pages_array(pages, maxsize, *start, maxpages);
1109 if (!n)
1110 return -ENOMEM;
451c0ba9 1111 res = get_user_pages_fast(addr, n, gup_flags, *pages);
814a6674 1112 if (unlikely(res <= 0))
e5393fae 1113 return res;
eba2d3d7
AV
1114 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1115 iov_iter_advance(i, maxsize);
1116 return maxsize;
3d671ca6
AV
1117 }
1118 if (iov_iter_is_bvec(i)) {
451c0ba9 1119 struct page **p;
3d671ca6
AV
1120 struct page *page;
1121
59dbd7d0 1122 page = first_bvec_segment(i, &maxsize, start);
3cf42da3
AV
1123 n = want_pages_array(pages, maxsize, *start, maxpages);
1124 if (!n)
1125 return -ENOMEM;
451c0ba9 1126 p = *pages;
dda8e5d1 1127 for (int k = 0; k < n; k++)
eba2d3d7
AV
1128 get_page(p[k] = page + k);
1129 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
310d9d5a
AV
1130 i->count -= maxsize;
1131 i->iov_offset += maxsize;
1132 if (i->iov_offset == i->bvec->bv_len) {
1133 i->iov_offset = 0;
1134 i->bvec++;
1135 i->nr_segs--;
1136 }
eba2d3d7 1137 return maxsize;
3d671ca6 1138 }
3d671ca6 1139 if (iov_iter_is_xarray(i))
451c0ba9 1140 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
3d671ca6 1141 return -EFAULT;
62a8067a 1142}
62a8067a 1143
84bd06c6
CH
1144ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
1145 size_t maxsize, unsigned maxpages, size_t *start)
62a8067a 1146{
451c0ba9 1147 if (!maxpages)
3d671ca6 1148 return 0;
451c0ba9 1149 BUG_ON(!pages);
3d671ca6 1150
84bd06c6 1151 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start);
62a8067a 1152}
eba2d3d7 1153EXPORT_SYMBOL(iov_iter_get_pages2);
91329559 1154
84bd06c6
CH
1155ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
1156 struct page ***pages, size_t maxsize, size_t *start)
91329559
AV
1157{
1158 ssize_t len;
1159
1160 *pages = NULL;
1161
84bd06c6 1162 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start);
91329559
AV
1163 if (len <= 0) {
1164 kvfree(*pages);
1165 *pages = NULL;
1166 }
1167 return len;
1168}
eba2d3d7 1169EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
62a8067a 1170
a604ec7e
AV
1171size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1172 struct iov_iter *i)
1173{
a604ec7e 1174 __wsum sum, next;
a604ec7e 1175 sum = *csum;
a41dad90 1176 if (WARN_ON_ONCE(!i->data_source))
241699cd 1177 return 0;
a41dad90 1178
7baa5099
AV
1179 iterate_and_advance(i, bytes, base, len, off, ({
1180 next = csum_and_copy_from_user(base, addr + off, len);
2495bdcc 1181 sum = csum_block_add(sum, next, off);
7baa5099 1182 next ? 0 : len;
a604ec7e 1183 }), ({
7baa5099 1184 sum = csum_and_memcpy(addr + off, base, len, sum, off);
a604ec7e
AV
1185 })
1186 )
1187 *csum = sum;
1188 return bytes;
1189}
1190EXPORT_SYMBOL(csum_and_copy_from_iter);
1191
52cbd23a 1192size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
a604ec7e
AV
1193 struct iov_iter *i)
1194{
52cbd23a 1195 struct csum_state *csstate = _csstate;
a604ec7e 1196 __wsum sum, next;
78e1f386 1197
a41dad90
AV
1198 if (WARN_ON_ONCE(i->data_source))
1199 return 0;
78e1f386 1200 if (unlikely(iov_iter_is_discard(i))) {
c67f1fd2
AV
1201 // can't use csum_memcpy() for that one - data is not copied
1202 csstate->csum = csum_block_add(csstate->csum,
1203 csum_partial(addr, bytes, 0),
1204 csstate->off);
1205 csstate->off += bytes;
1206 return bytes;
241699cd 1207 }
6852df12
AV
1208
1209 sum = csum_shift(csstate->csum, csstate->off);
3fc40265 1210 iterate_and_advance(i, bytes, base, len, off, ({
7baa5099 1211 next = csum_and_copy_to_user(addr + off, base, len);
2495bdcc 1212 sum = csum_block_add(sum, next, off);
7baa5099 1213 next ? 0 : len;
a604ec7e 1214 }), ({
7baa5099 1215 sum = csum_and_memcpy(base, addr + off, len, sum, off);
a604ec7e
AV
1216 })
1217 )
594e450b
AV
1218 csstate->csum = csum_shift(sum, csstate->off);
1219 csstate->off += bytes;
a604ec7e
AV
1220 return bytes;
1221}
1222EXPORT_SYMBOL(csum_and_copy_to_iter);
1223
d05f4435
SG
1224size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1225 struct iov_iter *i)
1226{
7999096f 1227#ifdef CONFIG_CRYPTO_HASH
d05f4435
SG
1228 struct ahash_request *hash = hashp;
1229 struct scatterlist sg;
1230 size_t copied;
1231
1232 copied = copy_to_iter(addr, bytes, i);
1233 sg_init_one(&sg, addr, copied);
1234 ahash_request_set_crypt(hash, &sg, NULL, copied);
1235 crypto_ahash_update(hash);
1236 return copied;
27fad74a
Y
1237#else
1238 return 0;
1239#endif
d05f4435
SG
1240}
1241EXPORT_SYMBOL(hash_and_copy_to_iter);
1242
66531c65 1243static int iov_npages(const struct iov_iter *i, int maxpages)
62a8067a 1244{
66531c65
AV
1245 size_t skip = i->iov_offset, size = i->count;
1246 const struct iovec *p;
e0f2dc40
AV
1247 int npages = 0;
1248
de4f5fed 1249 for (p = iter_iov(i); size; skip = 0, p++) {
66531c65
AV
1250 unsigned offs = offset_in_page(p->iov_base + skip);
1251 size_t len = min(p->iov_len - skip, size);
e0f2dc40 1252
66531c65
AV
1253 if (len) {
1254 size -= len;
1255 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1256 if (unlikely(npages > maxpages))
1257 return maxpages;
1258 }
1259 }
1260 return npages;
1261}
1262
1263static int bvec_npages(const struct iov_iter *i, int maxpages)
1264{
1265 size_t skip = i->iov_offset, size = i->count;
1266 const struct bio_vec *p;
1267 int npages = 0;
1268
1269 for (p = i->bvec; size; skip = 0, p++) {
1270 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1271 size_t len = min(p->bv_len - skip, size);
1272
1273 size -= len;
1274 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1275 if (unlikely(npages > maxpages))
1276 return maxpages;
1277 }
1278 return npages;
1279}
1280
1281int iov_iter_npages(const struct iov_iter *i, int maxpages)
1282{
1283 if (unlikely(!i->count))
1284 return 0;
fcb14cb1
AV
1285 if (likely(iter_is_ubuf(i))) {
1286 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1287 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1288 return min(npages, maxpages);
1289 }
66531c65
AV
1290 /* iovec and kvec have identical layouts */
1291 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1292 return iov_npages(i, maxpages);
1293 if (iov_iter_is_bvec(i))
1294 return bvec_npages(i, maxpages);
66531c65 1295 if (iov_iter_is_xarray(i)) {
e4f8df86
AV
1296 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1297 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
66531c65
AV
1298 return min(npages, maxpages);
1299 }
1300 return 0;
62a8067a 1301}
f67da30c 1302EXPORT_SYMBOL(iov_iter_npages);
4b8164b9
AV
1303
1304const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1305{
1306 *new = *old;
00e23707 1307 if (iov_iter_is_bvec(new))
4b8164b9
AV
1308 return new->bvec = kmemdup(new->bvec,
1309 new->nr_segs * sizeof(struct bio_vec),
1310 flags);
fcb14cb1 1311 else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
4b8164b9 1312 /* iovec and kvec have identical layout */
de4f5fed 1313 return new->__iov = kmemdup(new->__iov,
4b8164b9
AV
1314 new->nr_segs * sizeof(struct iovec),
1315 flags);
fcb14cb1 1316 return NULL;
4b8164b9
AV
1317}
1318EXPORT_SYMBOL(dup_iter);
bc917be8 1319
50f9a76e 1320static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
bfdc5970
CH
1321 const struct iovec __user *uvec, unsigned long nr_segs)
1322{
1323 const struct compat_iovec __user *uiov =
1324 (const struct compat_iovec __user *)uvec;
1325 int ret = -EFAULT, i;
1326
a959a978 1327 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
bfdc5970
CH
1328 return -EFAULT;
1329
1330 for (i = 0; i < nr_segs; i++) {
1331 compat_uptr_t buf;
1332 compat_ssize_t len;
1333
1334 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1335 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1336
1337 /* check for compat_size_t not fitting in compat_ssize_t .. */
1338 if (len < 0) {
1339 ret = -EINVAL;
1340 goto uaccess_end;
1341 }
1342 iov[i].iov_base = compat_ptr(buf);
1343 iov[i].iov_len = len;
1344 }
1345
1346 ret = 0;
1347uaccess_end:
1348 user_access_end();
1349 return ret;
1350}
1351
719a937b 1352static __noclone int copy_iovec_from_user(struct iovec *iov,
487c20b0 1353 const struct iovec __user *uiov, unsigned long nr_segs)
fb041b59 1354{
487c20b0 1355 int ret = -EFAULT;
fb041b59 1356
487c20b0 1357 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
bfdc5970 1358 return -EFAULT;
fb041b59 1359
487c20b0
LT
1360 do {
1361 void __user *buf;
1362 ssize_t len;
1363
1364 unsafe_get_user(len, &uiov->iov_len, uaccess_end);
1365 unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
1366
1367 /* check for size_t not fitting in ssize_t .. */
1368 if (unlikely(len < 0)) {
1369 ret = -EINVAL;
1370 goto uaccess_end;
1371 }
1372 iov->iov_base = buf;
1373 iov->iov_len = len;
1374
1375 uiov++; iov++;
1376 } while (--nr_segs);
1377
1378 ret = 0;
1379uaccess_end:
1380 user_access_end();
1381 return ret;
bfdc5970
CH
1382}
1383
1384struct iovec *iovec_from_user(const struct iovec __user *uvec,
1385 unsigned long nr_segs, unsigned long fast_segs,
1386 struct iovec *fast_iov, bool compat)
1387{
1388 struct iovec *iov = fast_iov;
1389 int ret;
1390
fb041b59 1391 /*
bfdc5970
CH
1392 * SuS says "The readv() function *may* fail if the iovcnt argument was
1393 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1394 * traditionally returned zero for zero segments, so...
fb041b59 1395 */
bfdc5970
CH
1396 if (nr_segs == 0)
1397 return iov;
1398 if (nr_segs > UIO_MAXIOV)
1399 return ERR_PTR(-EINVAL);
fb041b59
DL
1400 if (nr_segs > fast_segs) {
1401 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
bfdc5970
CH
1402 if (!iov)
1403 return ERR_PTR(-ENOMEM);
fb041b59 1404 }
bfdc5970 1405
487c20b0 1406 if (unlikely(compat))
bfdc5970
CH
1407 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1408 else
1409 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1410 if (ret) {
1411 if (iov != fast_iov)
1412 kfree(iov);
1413 return ERR_PTR(ret);
1414 }
1415
1416 return iov;
1417}
1418
3b2deb0e
JA
1419/*
1420 * Single segment iovec supplied by the user, import it as ITER_UBUF.
1421 */
1422static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
1423 struct iovec **iovp, struct iov_iter *i,
1424 bool compat)
1425{
1426 struct iovec *iov = *iovp;
1427 ssize_t ret;
1428
1429 if (compat)
1430 ret = copy_compat_iovec_from_user(iov, uvec, 1);
1431 else
1432 ret = copy_iovec_from_user(iov, uvec, 1);
1433 if (unlikely(ret))
1434 return ret;
1435
1436 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
1437 if (unlikely(ret))
1438 return ret;
1439 *iovp = NULL;
1440 return i->count;
1441}
1442
bfdc5970
CH
1443ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1444 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1445 struct iov_iter *i, bool compat)
1446{
1447 ssize_t total_len = 0;
1448 unsigned long seg;
1449 struct iovec *iov;
1450
3b2deb0e
JA
1451 if (nr_segs == 1)
1452 return __import_iovec_ubuf(type, uvec, iovp, i, compat);
1453
bfdc5970
CH
1454 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1455 if (IS_ERR(iov)) {
1456 *iovp = NULL;
1457 return PTR_ERR(iov);
fb041b59
DL
1458 }
1459
1460 /*
bfdc5970
CH
1461 * According to the Single Unix Specification we should return EINVAL if
1462 * an element length is < 0 when cast to ssize_t or if the total length
1463 * would overflow the ssize_t return value of the system call.
fb041b59
DL
1464 *
1465 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1466 * overflow case.
1467 */
fb041b59 1468 for (seg = 0; seg < nr_segs; seg++) {
fb041b59
DL
1469 ssize_t len = (ssize_t)iov[seg].iov_len;
1470
bfdc5970
CH
1471 if (!access_ok(iov[seg].iov_base, len)) {
1472 if (iov != *iovp)
1473 kfree(iov);
1474 *iovp = NULL;
1475 return -EFAULT;
fb041b59 1476 }
bfdc5970
CH
1477
1478 if (len > MAX_RW_COUNT - total_len) {
1479 len = MAX_RW_COUNT - total_len;
fb041b59
DL
1480 iov[seg].iov_len = len;
1481 }
bfdc5970 1482 total_len += len;
fb041b59 1483 }
bfdc5970
CH
1484
1485 iov_iter_init(i, type, iov, nr_segs, total_len);
1486 if (iov == *iovp)
1487 *iovp = NULL;
1488 else
1489 *iovp = iov;
1490 return total_len;
fb041b59
DL
1491}
1492
ffecee4f
VN
1493/**
1494 * import_iovec() - Copy an array of &struct iovec from userspace
1495 * into the kernel, check that it is valid, and initialize a new
1496 * &struct iov_iter iterator to access it.
1497 *
1498 * @type: One of %READ or %WRITE.
bfdc5970 1499 * @uvec: Pointer to the userspace array.
ffecee4f
VN
1500 * @nr_segs: Number of elements in userspace array.
1501 * @fast_segs: Number of elements in @iov.
bfdc5970 1502 * @iovp: (input and output parameter) Pointer to pointer to (usually small
ffecee4f
VN
1503 * on-stack) kernel array.
1504 * @i: Pointer to iterator that will be initialized on success.
1505 *
1506 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1507 * then this function places %NULL in *@iov on return. Otherwise, a new
1508 * array will be allocated and the result placed in *@iov. This means that
1509 * the caller may call kfree() on *@iov regardless of whether the small
1510 * on-stack array was used or not (and regardless of whether this function
1511 * returns an error or not).
1512 *
87e5e6da 1513 * Return: Negative error code on error, bytes imported on success
ffecee4f 1514 */
bfdc5970 1515ssize_t import_iovec(int type, const struct iovec __user *uvec,
bc917be8 1516 unsigned nr_segs, unsigned fast_segs,
bfdc5970 1517 struct iovec **iovp, struct iov_iter *i)
bc917be8 1518{
89cd35c5
CH
1519 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1520 in_compat_syscall());
bc917be8
AV
1521}
1522EXPORT_SYMBOL(import_iovec);
1523
bc917be8
AV
1524int import_single_range(int rw, void __user *buf, size_t len,
1525 struct iovec *iov, struct iov_iter *i)
1526{
1527 if (len > MAX_RW_COUNT)
1528 len = MAX_RW_COUNT;
96d4f267 1529 if (unlikely(!access_ok(buf, len)))
bc917be8
AV
1530 return -EFAULT;
1531
e03ad4ee 1532 iov_iter_ubuf(i, rw, buf, len);
bc917be8
AV
1533 return 0;
1534}
e1267585 1535EXPORT_SYMBOL(import_single_range);
8fb0f47a 1536
2ad9bd83
JA
1537int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
1538{
1539 if (len > MAX_RW_COUNT)
1540 len = MAX_RW_COUNT;
1541 if (unlikely(!access_ok(buf, len)))
1542 return -EFAULT;
1543
1544 iov_iter_ubuf(i, rw, buf, len);
1545 return 0;
1546}
1547
8fb0f47a
JA
1548/**
1549 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1550 * iov_iter_save_state() was called.
1551 *
1552 * @i: &struct iov_iter to restore
1553 * @state: state to restore from
1554 *
1555 * Used after iov_iter_save_state() to bring restore @i, if operations may
1556 * have advanced it.
1557 *
1558 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1559 */
1560void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1561{
4397a17c
KB
1562 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
1563 !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
8fb0f47a
JA
1564 return;
1565 i->iov_offset = state->iov_offset;
1566 i->count = state->count;
fcb14cb1
AV
1567 if (iter_is_ubuf(i))
1568 return;
8fb0f47a
JA
1569 /*
1570 * For the *vec iters, nr_segs + iov is constant - if we increment
1571 * the vec, then we also decrement the nr_segs count. Hence we don't
1572 * need to track both of these, just one is enough and we can deduct
1573 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
1574 * size, so we can just increment the iov pointer as they are unionzed.
1575 * ITER_BVEC _may_ be the same size on some archs, but on others it is
1576 * not. Be safe and handle it separately.
1577 */
1578 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
1579 if (iov_iter_is_bvec(i))
1580 i->bvec -= state->nr_segs - i->nr_segs;
1581 else
de4f5fed 1582 i->__iov -= state->nr_segs - i->nr_segs;
8fb0f47a
JA
1583 i->nr_segs = state->nr_segs;
1584}
7d58fe73
DH
1585
1586/*
1587 * Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not
1588 * get references on the pages, nor does it get a pin on them.
1589 */
1590static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
1591 struct page ***pages, size_t maxsize,
1592 unsigned int maxpages,
1593 iov_iter_extraction_t extraction_flags,
1594 size_t *offset0)
1595{
1596 struct page *page, **p;
1597 unsigned int nr = 0, offset;
1598 loff_t pos = i->xarray_start + i->iov_offset;
1599 pgoff_t index = pos >> PAGE_SHIFT;
1600 XA_STATE(xas, i->xarray, index);
1601
1602 offset = pos & ~PAGE_MASK;
1603 *offset0 = offset;
1604
1605 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1606 if (!maxpages)
1607 return -ENOMEM;
1608 p = *pages;
1609
1610 rcu_read_lock();
1611 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1612 if (xas_retry(&xas, page))
1613 continue;
1614
1615 /* Has the page moved or been split? */
1616 if (unlikely(page != xas_reload(&xas))) {
1617 xas_reset(&xas);
1618 continue;
1619 }
1620
1621 p[nr++] = find_subpage(page, xas.xa_index);
1622 if (nr == maxpages)
1623 break;
1624 }
1625 rcu_read_unlock();
1626
1627 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1628 iov_iter_advance(i, maxsize);
1629 return maxsize;
1630}
1631
1632/*
1633 * Extract a list of contiguous pages from an ITER_BVEC iterator. This does
1634 * not get references on the pages, nor does it get a pin on them.
1635 */
1636static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
1637 struct page ***pages, size_t maxsize,
1638 unsigned int maxpages,
1639 iov_iter_extraction_t extraction_flags,
1640 size_t *offset0)
1641{
1642 struct page **p, *page;
1643 size_t skip = i->iov_offset, offset;
1644 int k;
1645
1646 for (;;) {
1647 if (i->nr_segs == 0)
1648 return 0;
1649 maxsize = min(maxsize, i->bvec->bv_len - skip);
1650 if (maxsize)
1651 break;
1652 i->iov_offset = 0;
1653 i->nr_segs--;
1654 i->bvec++;
1655 skip = 0;
1656 }
1657
1658 skip += i->bvec->bv_offset;
1659 page = i->bvec->bv_page + skip / PAGE_SIZE;
1660 offset = skip % PAGE_SIZE;
1661 *offset0 = offset;
1662
1663 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1664 if (!maxpages)
1665 return -ENOMEM;
1666 p = *pages;
1667 for (k = 0; k < maxpages; k++)
1668 p[k] = page + k;
1669
1670 maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
1671 iov_iter_advance(i, maxsize);
1672 return maxsize;
1673}
1674
1675/*
1676 * Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
1677 * This does not get references on the pages, nor does it get a pin on them.
1678 */
1679static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
1680 struct page ***pages, size_t maxsize,
1681 unsigned int maxpages,
1682 iov_iter_extraction_t extraction_flags,
1683 size_t *offset0)
1684{
1685 struct page **p, *page;
1686 const void *kaddr;
1687 size_t skip = i->iov_offset, offset, len;
1688 int k;
1689
1690 for (;;) {
1691 if (i->nr_segs == 0)
1692 return 0;
1693 maxsize = min(maxsize, i->kvec->iov_len - skip);
1694 if (maxsize)
1695 break;
1696 i->iov_offset = 0;
1697 i->nr_segs--;
1698 i->kvec++;
1699 skip = 0;
1700 }
1701
1702 kaddr = i->kvec->iov_base + skip;
1703 offset = (unsigned long)kaddr & ~PAGE_MASK;
1704 *offset0 = offset;
1705
1706 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1707 if (!maxpages)
1708 return -ENOMEM;
1709 p = *pages;
1710
1711 kaddr -= offset;
1712 len = offset + maxsize;
1713 for (k = 0; k < maxpages; k++) {
1714 size_t seg = min_t(size_t, len, PAGE_SIZE);
1715
1716 if (is_vmalloc_or_module_addr(kaddr))
1717 page = vmalloc_to_page(kaddr);
1718 else
1719 page = virt_to_page(kaddr);
1720
1721 p[k] = page;
1722 len -= seg;
1723 kaddr += PAGE_SIZE;
1724 }
1725
1726 maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
1727 iov_iter_advance(i, maxsize);
1728 return maxsize;
1729}
1730
1731/*
1732 * Extract a list of contiguous pages from a user iterator and get a pin on
1733 * each of them. This should only be used if the iterator is user-backed
1734 * (IOBUF/UBUF).
1735 *
1736 * It does not get refs on the pages, but the pages must be unpinned by the
1737 * caller once the transfer is complete.
1738 *
1739 * This is safe to be used where background IO/DMA *is* going to be modifying
1740 * the buffer; using a pin rather than a ref makes forces fork() to give the
1741 * child a copy of the page.
1742 */
1743static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
1744 struct page ***pages,
1745 size_t maxsize,
1746 unsigned int maxpages,
1747 iov_iter_extraction_t extraction_flags,
1748 size_t *offset0)
1749{
1750 unsigned long addr;
1751 unsigned int gup_flags = 0;
1752 size_t offset;
1753 int res;
1754
1755 if (i->data_source == ITER_DEST)
1756 gup_flags |= FOLL_WRITE;
1757 if (extraction_flags & ITER_ALLOW_P2PDMA)
1758 gup_flags |= FOLL_PCI_P2PDMA;
1759 if (i->nofault)
1760 gup_flags |= FOLL_NOFAULT;
1761
1762 addr = first_iovec_segment(i, &maxsize);
1763 *offset0 = offset = addr % PAGE_SIZE;
1764 addr &= PAGE_MASK;
1765 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1766 if (!maxpages)
1767 return -ENOMEM;
1768 res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages);
1769 if (unlikely(res <= 0))
1770 return res;
1771 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
1772 iov_iter_advance(i, maxsize);
1773 return maxsize;
1774}
1775
1776/**
1777 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
1778 * @i: The iterator to extract from
1779 * @pages: Where to return the list of pages
1780 * @maxsize: The maximum amount of iterator to extract
1781 * @maxpages: The maximum size of the list of pages
1782 * @extraction_flags: Flags to qualify request
1783 * @offset0: Where to return the starting offset into (*@pages)[0]
1784 *
1785 * Extract a list of contiguous pages from the current point of the iterator,
1786 * advancing the iterator. The maximum number of pages and the maximum amount
1787 * of page contents can be set.
1788 *
1789 * If *@pages is NULL, a page list will be allocated to the required size and
1790 * *@pages will be set to its base. If *@pages is not NULL, it will be assumed
1791 * that the caller allocated a page list at least @maxpages in size and this
1792 * will be filled in.
1793 *
1794 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
1795 * be allowed on the pages extracted.
1796 *
1797 * The iov_iter_extract_will_pin() function can be used to query how cleanup
1798 * should be performed.
1799 *
1800 * Extra refs or pins on the pages may be obtained as follows:
1801 *
1802 * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
1803 * added to the pages, but refs will not be taken.
1804 * iov_iter_extract_will_pin() will return true.
1805 *
1806 * (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are
1807 * merely listed; no extra refs or pins are obtained.
1808 * iov_iter_extract_will_pin() will return 0.
1809 *
1810 * Note also:
1811 *
1812 * (*) Use with ITER_DISCARD is not supported as that has no content.
1813 *
1814 * On success, the function sets *@pages to the new pagelist, if allocated, and
1815 * sets *offset0 to the offset into the first page.
1816 *
1817 * It may also return -ENOMEM and -EFAULT.
1818 */
1819ssize_t iov_iter_extract_pages(struct iov_iter *i,
1820 struct page ***pages,
1821 size_t maxsize,
1822 unsigned int maxpages,
1823 iov_iter_extraction_t extraction_flags,
1824 size_t *offset0)
1825{
1826 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
1827 if (!maxsize)
1828 return 0;
1829
1830 if (likely(user_backed_iter(i)))
1831 return iov_iter_extract_user_pages(i, pages, maxsize,
1832 maxpages, extraction_flags,
1833 offset0);
1834 if (iov_iter_is_kvec(i))
1835 return iov_iter_extract_kvec_pages(i, pages, maxsize,
1836 maxpages, extraction_flags,
1837 offset0);
1838 if (iov_iter_is_bvec(i))
1839 return iov_iter_extract_bvec_pages(i, pages, maxsize,
1840 maxpages, extraction_flags,
1841 offset0);
1842 if (iov_iter_is_xarray(i))
1843 return iov_iter_extract_xarray_pages(i, pages, maxsize,
1844 maxpages, extraction_flags,
1845 offset0);
1846 return -EFAULT;
1847}
1848EXPORT_SYMBOL_GPL(iov_iter_extract_pages);