Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[linux-block.git] / lib / iov_iter.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
4f18cd31 2#include <linux/export.h>
2f8b5444 3#include <linux/bvec.h>
4d0e9df5 4#include <linux/fault-inject-usercopy.h>
4f18cd31
AV
5#include <linux/uio.h>
6#include <linux/pagemap.h>
28961998 7#include <linux/highmem.h>
91f79c43
AV
8#include <linux/slab.h>
9#include <linux/vmalloc.h>
241699cd 10#include <linux/splice.h>
bfdc5970 11#include <linux/compat.h>
d05f4435 12#include <linux/scatterlist.h>
d0ef4c36 13#include <linux/instrumented.h>
f1982740 14#include <linux/iov_iter.h>
4f18cd31 15
f1982740
DH
16static __always_inline
17size_t copy_to_user_iter(void __user *iter_to, size_t progress,
18 size_t len, void *from, void *priv2)
09fc68dc 19{
4d0e9df5 20 if (should_fail_usercopy())
f1982740
DH
21 return len;
22 if (access_ok(iter_to, len)) {
23 from += progress;
24 instrument_copy_to_user(iter_to, from, len);
25 len = raw_copy_to_user(iter_to, from, len);
09fc68dc 26 }
f1982740 27 return len;
09fc68dc
AV
28}
29
f1982740
DH
30static __always_inline
31size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress,
32 size_t len, void *from, void *priv2)
4f80818b 33{
f1982740 34 ssize_t res;
4f80818b
LS
35
36 if (should_fail_usercopy())
f1982740 37 return len;
4f80818b 38
f1982740
DH
39 from += progress;
40 res = copy_to_user_nofault(iter_to, from, len);
41 return res < 0 ? len : res;
4f80818b
LS
42}
43
f1982740
DH
44static __always_inline
45size_t copy_from_user_iter(void __user *iter_from, size_t progress,
46 size_t len, void *to, void *priv2)
09fc68dc 47{
f1982740 48 size_t res = len;
33b75c1d 49
4d0e9df5 50 if (should_fail_usercopy())
f1982740
DH
51 return len;
52 if (access_ok(iter_from, len)) {
53 to += progress;
54 instrument_copy_from_user_before(to, iter_from, len);
55 res = raw_copy_from_user(to, iter_from, len);
56 instrument_copy_from_user_after(to, iter_from, len, res);
09fc68dc 57 }
33b75c1d 58 return res;
09fc68dc
AV
59}
60
f1982740
DH
61static __always_inline
62size_t memcpy_to_iter(void *iter_to, size_t progress,
63 size_t len, void *from, void *priv2)
64{
65 memcpy(iter_to, from + progress, len);
66 return 0;
67}
68
69static __always_inline
70size_t memcpy_from_iter(void *iter_from, size_t progress,
71 size_t len, void *to, void *priv2)
72{
73 memcpy(to + progress, iter_from, len);
74 return 0;
75}
76
171a0203 77/*
a6294593
AG
78 * fault_in_iov_iter_readable - fault in iov iterator for reading
79 * @i: iterator
80 * @size: maximum length
81 *
171a0203 82 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
a6294593
AG
83 * @size. For each iovec, fault in each page that constitutes the iovec.
84 *
85 * Returns the number of bytes not faulted in (like copy_to_user() and
86 * copy_from_user()).
171a0203 87 *
a6294593 88 * Always returns 0 for non-userspace iterators.
171a0203 89 */
a6294593 90size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
171a0203 91{
fcb14cb1
AV
92 if (iter_is_ubuf(i)) {
93 size_t n = min(size, iov_iter_count(i));
94 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
95 return size - n;
96 } else if (iter_is_iovec(i)) {
a6294593 97 size_t count = min(size, iov_iter_count(i));
8409a0d2
AV
98 const struct iovec *p;
99 size_t skip;
100
a6294593 101 size -= count;
de4f5fed 102 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
a6294593
AG
103 size_t len = min(count, p->iov_len - skip);
104 size_t ret;
8409a0d2
AV
105
106 if (unlikely(!len))
107 continue;
a6294593
AG
108 ret = fault_in_readable(p->iov_base + skip, len);
109 count -= len - ret;
110 if (ret)
111 break;
8409a0d2 112 }
a6294593 113 return count + size;
171a0203
AA
114 }
115 return 0;
116}
a6294593 117EXPORT_SYMBOL(fault_in_iov_iter_readable);
171a0203 118
cdd591fc
AG
119/*
120 * fault_in_iov_iter_writeable - fault in iov iterator for writing
121 * @i: iterator
122 * @size: maximum length
123 *
124 * Faults in the iterator using get_user_pages(), i.e., without triggering
125 * hardware page faults. This is primarily useful when we already know that
126 * some or all of the pages in @i aren't in memory.
127 *
128 * Returns the number of bytes not faulted in, like copy_to_user() and
129 * copy_from_user().
130 *
131 * Always returns 0 for non-user-space iterators.
132 */
133size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
134{
fcb14cb1
AV
135 if (iter_is_ubuf(i)) {
136 size_t n = min(size, iov_iter_count(i));
137 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
138 return size - n;
139 } else if (iter_is_iovec(i)) {
cdd591fc
AG
140 size_t count = min(size, iov_iter_count(i));
141 const struct iovec *p;
142 size_t skip;
143
144 size -= count;
de4f5fed 145 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
cdd591fc
AG
146 size_t len = min(count, p->iov_len - skip);
147 size_t ret;
148
149 if (unlikely(!len))
150 continue;
151 ret = fault_in_safe_writeable(p->iov_base + skip, len);
152 count -= len - ret;
153 if (ret)
154 break;
155 }
156 return count + size;
157 }
158 return 0;
159}
160EXPORT_SYMBOL(fault_in_iov_iter_writeable);
161
aa563d7b 162void iov_iter_init(struct iov_iter *i, unsigned int direction,
71d8e532
AV
163 const struct iovec *iov, unsigned long nr_segs,
164 size_t count)
165{
aa563d7b 166 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
167 *i = (struct iov_iter) {
168 .iter_type = ITER_IOVEC,
3337ab08 169 .nofault = false,
8cd54c1c 170 .data_source = direction,
de4f5fed 171 .__iov = iov,
8cd54c1c
AV
172 .nr_segs = nr_segs,
173 .iov_offset = 0,
174 .count = count
175 };
71d8e532
AV
176}
177EXPORT_SYMBOL(iov_iter_init);
7b2c99d1 178
aa28de27 179size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
62a8067a 180{
a41dad90
AV
181 if (WARN_ON_ONCE(i->data_source))
182 return 0;
fcb14cb1 183 if (user_backed_iter(i))
09fc68dc 184 might_fault();
f1982740
DH
185 return iterate_and_advance(i, bytes, (void *)addr,
186 copy_to_user_iter, memcpy_to_iter);
c35e0248 187}
aa28de27 188EXPORT_SYMBOL(_copy_to_iter);
c35e0248 189
ec6347bb 190#ifdef CONFIG_ARCH_HAS_COPY_MC
f1982740
DH
191static __always_inline
192size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress,
193 size_t len, void *from, void *priv2)
8780356e 194{
f1982740
DH
195 if (access_ok(iter_to, len)) {
196 from += progress;
197 instrument_copy_to_user(iter_to, from, len);
198 len = copy_mc_to_user(iter_to, from, len);
8780356e 199 }
f1982740
DH
200 return len;
201}
202
203static __always_inline
204size_t memcpy_to_iter_mc(void *iter_to, size_t progress,
205 size_t len, void *from, void *priv2)
206{
207 return copy_mc_to_kernel(iter_to, from + progress, len);
8780356e
DW
208}
209
bf3eeb9b 210/**
ec6347bb 211 * _copy_mc_to_iter - copy to iter with source memory error exception handling
bf3eeb9b
DW
212 * @addr: source kernel address
213 * @bytes: total transfer length
44e55997 214 * @i: destination iterator
bf3eeb9b 215 *
ec6347bb
DW
216 * The pmem driver deploys this for the dax operation
217 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
218 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
219 * successfully copied.
bf3eeb9b 220 *
ec6347bb 221 * The main differences between this and typical _copy_to_iter().
bf3eeb9b
DW
222 *
223 * * Typical tail/residue handling after a fault retries the copy
224 * byte-by-byte until the fault happens again. Re-triggering machine
225 * checks is potentially fatal so the implementation uses source
226 * alignment and poison alignment assumptions to avoid re-triggering
227 * hardware exceptions.
228 *
3fc40265
DH
229 * * ITER_KVEC and ITER_BVEC can return short copies. Compare to
230 * copy_to_iter() where only ITER_IOVEC attempts might return a short copy.
44e55997
RD
231 *
232 * Return: number of bytes copied (may be %0)
bf3eeb9b 233 */
ec6347bb 234size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
8780356e 235{
a41dad90
AV
236 if (WARN_ON_ONCE(i->data_source))
237 return 0;
fcb14cb1 238 if (user_backed_iter(i))
8780356e 239 might_fault();
f1982740
DH
240 return iterate_and_advance(i, bytes, (void *)addr,
241 copy_to_user_iter_mc, memcpy_to_iter_mc);
8780356e 242}
ec6347bb
DW
243EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
244#endif /* CONFIG_ARCH_HAS_COPY_MC */
8780356e 245
c9eec08b
DH
246static __always_inline
247size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
248{
c9eec08b
DH
249 return iterate_and_advance(i, bytes, addr,
250 copy_from_user_iter, memcpy_from_iter);
245f0922
KW
251}
252
aa28de27 253size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
c35e0248 254{
a41dad90 255 if (WARN_ON_ONCE(!i->data_source))
241699cd 256 return 0;
a41dad90 257
fcb14cb1 258 if (user_backed_iter(i))
09fc68dc 259 might_fault();
c9eec08b 260 return __copy_from_iter(addr, bytes, i);
c35e0248 261}
aa28de27 262EXPORT_SYMBOL(_copy_from_iter);
c35e0248 263
f1982740
DH
264static __always_inline
265size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress,
266 size_t len, void *to, void *priv2)
267{
268 return __copy_from_user_inatomic_nocache(to + progress, iter_from, len);
269}
270
aa28de27 271size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
aa583096 272{
a41dad90 273 if (WARN_ON_ONCE(!i->data_source))
241699cd 274 return 0;
a41dad90 275
f1982740
DH
276 return iterate_and_advance(i, bytes, addr,
277 copy_from_user_iter_nocache,
278 memcpy_from_iter);
aa583096 279}
aa28de27 280EXPORT_SYMBOL(_copy_from_iter_nocache);
aa583096 281
0aed55af 282#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
f1982740
DH
283static __always_inline
284size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress,
285 size_t len, void *to, void *priv2)
286{
287 return __copy_from_user_flushcache(to + progress, iter_from, len);
288}
289
290static __always_inline
291size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress,
292 size_t len, void *to, void *priv2)
293{
294 memcpy_flushcache(to + progress, iter_from, len);
295 return 0;
296}
297
abd08d7d
DW
298/**
299 * _copy_from_iter_flushcache - write destination through cpu cache
300 * @addr: destination kernel address
301 * @bytes: total transfer length
44e55997 302 * @i: source iterator
abd08d7d
DW
303 *
304 * The pmem driver arranges for filesystem-dax to use this facility via
305 * dax_copy_from_iter() for ensuring that writes to persistent memory
306 * are flushed through the CPU cache. It is differentiated from
307 * _copy_from_iter_nocache() in that guarantees all data is flushed for
308 * all iterator types. The _copy_from_iter_nocache() only attempts to
309 * bypass the cache for the ITER_IOVEC case, and on some archs may use
310 * instructions that strand dirty-data in the cache.
44e55997
RD
311 *
312 * Return: number of bytes copied (may be %0)
abd08d7d 313 */
6a37e940 314size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
0aed55af 315{
a41dad90 316 if (WARN_ON_ONCE(!i->data_source))
0aed55af 317 return 0;
a41dad90 318
f1982740
DH
319 return iterate_and_advance(i, bytes, addr,
320 copy_from_user_iter_flushcache,
321 memcpy_from_iter_flushcache);
0aed55af 322}
6a37e940 323EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
0aed55af
DW
324#endif
325
72e809ed
AV
326static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
327{
6daef95b
ED
328 struct page *head;
329 size_t v = n + offset;
330
331 /*
332 * The general case needs to access the page order in order
333 * to compute the page size.
334 * However, we mostly deal with order-0 pages and thus can
335 * avoid a possible cache line miss for requests that fit all
336 * page orders.
337 */
338 if (n <= v && v <= PAGE_SIZE)
339 return true;
340
341 head = compound_head(page);
342 v += (page - head) << PAGE_SHIFT;
a90bcb86 343
40a86061
AV
344 if (WARN_ON(n > v || v > page_size(head)))
345 return false;
346 return true;
72e809ed 347}
cbbd26b8 348
08aa6479
AV
349size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
350 struct iov_iter *i)
351{
352 size_t res = 0;
40a86061 353 if (!page_copy_sane(page, offset, bytes))
08aa6479 354 return 0;
a41dad90
AV
355 if (WARN_ON_ONCE(i->data_source))
356 return 0;
08aa6479
AV
357 page += offset / PAGE_SIZE; // first subpage
358 offset %= PAGE_SIZE;
359 while (1) {
f0f6b614
AV
360 void *kaddr = kmap_local_page(page);
361 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
362 n = _copy_to_iter(kaddr + offset, n, i);
363 kunmap_local(kaddr);
08aa6479
AV
364 res += n;
365 bytes -= n;
366 if (!bytes || !n)
367 break;
368 offset += n;
369 if (offset == PAGE_SIZE) {
370 page++;
371 offset = 0;
372 }
373 }
374 return res;
375}
62a8067a
AV
376EXPORT_SYMBOL(copy_page_to_iter);
377
4f80818b
LS
378size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes,
379 struct iov_iter *i)
380{
381 size_t res = 0;
382
383 if (!page_copy_sane(page, offset, bytes))
384 return 0;
385 if (WARN_ON_ONCE(i->data_source))
386 return 0;
4f80818b
LS
387 page += offset / PAGE_SIZE; // first subpage
388 offset %= PAGE_SIZE;
389 while (1) {
390 void *kaddr = kmap_local_page(page);
391 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
392
fe2c34ba 393 n = iterate_and_advance(i, n, kaddr + offset,
f1982740
DH
394 copy_to_user_iter_nofault,
395 memcpy_to_iter);
4f80818b
LS
396 kunmap_local(kaddr);
397 res += n;
398 bytes -= n;
399 if (!bytes || !n)
400 break;
401 offset += n;
402 if (offset == PAGE_SIZE) {
403 page++;
404 offset = 0;
405 }
406 }
407 return res;
408}
409EXPORT_SYMBOL(copy_page_to_iter_nofault);
410
62a8067a
AV
411size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
412 struct iov_iter *i)
413{
c03f05f1
AV
414 size_t res = 0;
415 if (!page_copy_sane(page, offset, bytes))
416 return 0;
417 page += offset / PAGE_SIZE; // first subpage
418 offset %= PAGE_SIZE;
419 while (1) {
55ca375c 420 void *kaddr = kmap_local_page(page);
c03f05f1
AV
421 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
422 n = _copy_from_iter(kaddr + offset, n, i);
55ca375c 423 kunmap_local(kaddr);
c03f05f1
AV
424 res += n;
425 bytes -= n;
426 if (!bytes || !n)
427 break;
428 offset += n;
429 if (offset == PAGE_SIZE) {
430 page++;
431 offset = 0;
432 }
28f38db7 433 }
c03f05f1 434 return res;
62a8067a
AV
435}
436EXPORT_SYMBOL(copy_page_from_iter);
437
f1982740
DH
438static __always_inline
439size_t zero_to_user_iter(void __user *iter_to, size_t progress,
440 size_t len, void *priv, void *priv2)
c35e0248 441{
f1982740
DH
442 return clear_user(iter_to, len);
443}
8442fa46 444
f1982740
DH
445static __always_inline
446size_t zero_to_iter(void *iter_to, size_t progress,
447 size_t len, void *priv, void *priv2)
448{
449 memset(iter_to, 0, len);
450 return 0;
451}
452
453size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
454{
455 return iterate_and_advance(i, bytes, NULL,
456 zero_to_user_iter, zero_to_iter);
c35e0248
MW
457}
458EXPORT_SYMBOL(iov_iter_zero);
459
d9736929 460size_t copy_folio_from_iter_atomic(struct folio *folio, size_t offset,
908a1ad8 461 size_t bytes, struct iov_iter *i)
62a8067a 462{
908a1ad8 463 size_t n, copied = 0;
f7f9a0c8 464
d9736929 465 if (!page_copy_sane(&folio->page, offset, bytes))
72e809ed 466 return 0;
f7f9a0c8 467 if (WARN_ON_ONCE(!i->data_source))
241699cd 468 return 0;
f7f9a0c8 469
908a1ad8 470 do {
d9736929 471 char *to = kmap_local_folio(folio, offset);
f7f9a0c8 472
908a1ad8 473 n = bytes - copied;
d9736929
MWO
474 if (folio_test_partial_kmap(folio) &&
475 n > PAGE_SIZE - offset_in_page(offset))
476 n = PAGE_SIZE - offset_in_page(offset);
477
478 pagefault_disable();
479 n = __copy_from_iter(to, n, i);
480 pagefault_enable();
481 kunmap_local(to);
908a1ad8
MWO
482 copied += n;
483 offset += n;
d9736929 484 } while (copied != bytes && n > 0);
908a1ad8
MWO
485
486 return copied;
62a8067a 487}
d9736929 488EXPORT_SYMBOL(copy_folio_from_iter_atomic);
62a8067a 489
54c8195b
PB
490static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
491{
18fa9af7 492 const struct bio_vec *bvec, *end;
54c8195b 493
18fa9af7
AV
494 if (!i->count)
495 return;
496 i->count -= size;
497
498 size += i->iov_offset;
54c8195b 499
18fa9af7
AV
500 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
501 if (likely(size < bvec->bv_len))
502 break;
503 size -= bvec->bv_len;
504 }
505 i->iov_offset = size;
506 i->nr_segs -= bvec - i->bvec;
507 i->bvec = bvec;
54c8195b
PB
508}
509
185ac4d4
AV
510static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
511{
512 const struct iovec *iov, *end;
513
514 if (!i->count)
515 return;
516 i->count -= size;
517
518 size += i->iov_offset; // from beginning of current segment
de4f5fed 519 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
185ac4d4
AV
520 if (likely(size < iov->iov_len))
521 break;
522 size -= iov->iov_len;
523 }
524 i->iov_offset = size;
de4f5fed
JA
525 i->nr_segs -= iov - iter_iov(i);
526 i->__iov = iov;
185ac4d4
AV
527}
528
db0aa2e9
DH
529static void iov_iter_folioq_advance(struct iov_iter *i, size_t size)
530{
531 const struct folio_queue *folioq = i->folioq;
532 unsigned int slot = i->folioq_slot;
533
534 if (!i->count)
535 return;
536 i->count -= size;
537
538 if (slot >= folioq_nr_slots(folioq)) {
539 folioq = folioq->next;
540 slot = 0;
541 }
542
543 size += i->iov_offset; /* From beginning of current segment. */
544 do {
545 size_t fsize = folioq_folio_size(folioq, slot);
546
547 if (likely(size < fsize))
548 break;
549 size -= fsize;
550 slot++;
551 if (slot >= folioq_nr_slots(folioq) && folioq->next) {
552 folioq = folioq->next;
553 slot = 0;
554 }
555 } while (size);
556
557 i->iov_offset = size;
558 i->folioq_slot = slot;
559 i->folioq = folioq;
560}
561
62a8067a
AV
562void iov_iter_advance(struct iov_iter *i, size_t size)
563{
3b3fc051
AV
564 if (unlikely(i->count < size))
565 size = i->count;
fcb14cb1
AV
566 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
567 i->iov_offset += size;
568 i->count -= size;
569 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
185ac4d4
AV
570 /* iovec and kvec have identical layouts */
571 iov_iter_iovec_advance(i, size);
572 } else if (iov_iter_is_bvec(i)) {
573 iov_iter_bvec_advance(i, size);
db0aa2e9
DH
574 } else if (iov_iter_is_folioq(i)) {
575 iov_iter_folioq_advance(i, size);
185ac4d4
AV
576 } else if (iov_iter_is_discard(i)) {
577 i->count -= size;
54c8195b 578 }
62a8067a
AV
579}
580EXPORT_SYMBOL(iov_iter_advance);
581
db0aa2e9
DH
582static void iov_iter_folioq_revert(struct iov_iter *i, size_t unroll)
583{
584 const struct folio_queue *folioq = i->folioq;
585 unsigned int slot = i->folioq_slot;
586
587 for (;;) {
588 size_t fsize;
589
590 if (slot == 0) {
591 folioq = folioq->prev;
592 slot = folioq_nr_slots(folioq);
593 }
594 slot--;
595
596 fsize = folioq_folio_size(folioq, slot);
597 if (unroll <= fsize) {
598 i->iov_offset = fsize - unroll;
599 break;
600 }
601 unroll -= fsize;
602 }
603
604 i->folioq_slot = slot;
605 i->folioq = folioq;
606}
607
27c0e374
AV
608void iov_iter_revert(struct iov_iter *i, size_t unroll)
609{
610 if (!unroll)
611 return;
5b47d59a
AV
612 if (WARN_ON(unroll > MAX_RW_COUNT))
613 return;
27c0e374 614 i->count += unroll;
9ea9ce04
DH
615 if (unlikely(iov_iter_is_discard(i)))
616 return;
27c0e374
AV
617 if (unroll <= i->iov_offset) {
618 i->iov_offset -= unroll;
619 return;
620 }
621 unroll -= i->iov_offset;
fcb14cb1 622 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
7ff50620
DH
623 BUG(); /* We should never go beyond the start of the specified
624 * range since we might then be straying into pages that
625 * aren't pinned.
626 */
627 } else if (iov_iter_is_bvec(i)) {
27c0e374
AV
628 const struct bio_vec *bvec = i->bvec;
629 while (1) {
630 size_t n = (--bvec)->bv_len;
631 i->nr_segs++;
632 if (unroll <= n) {
633 i->bvec = bvec;
634 i->iov_offset = n - unroll;
635 return;
636 }
637 unroll -= n;
638 }
db0aa2e9
DH
639 } else if (iov_iter_is_folioq(i)) {
640 i->iov_offset = 0;
641 iov_iter_folioq_revert(i, unroll);
27c0e374 642 } else { /* same logics for iovec and kvec */
de4f5fed 643 const struct iovec *iov = iter_iov(i);
27c0e374
AV
644 while (1) {
645 size_t n = (--iov)->iov_len;
646 i->nr_segs++;
647 if (unroll <= n) {
de4f5fed 648 i->__iov = iov;
27c0e374
AV
649 i->iov_offset = n - unroll;
650 return;
651 }
652 unroll -= n;
653 }
654 }
655}
656EXPORT_SYMBOL(iov_iter_revert);
657
62a8067a
AV
658/*
659 * Return the count of just the current iov_iter segment.
660 */
661size_t iov_iter_single_seg_count(const struct iov_iter *i)
662{
28f38db7
AV
663 if (i->nr_segs > 1) {
664 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
de4f5fed 665 return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
28f38db7
AV
666 if (iov_iter_is_bvec(i))
667 return min(i->count, i->bvec->bv_len - i->iov_offset);
668 }
db0aa2e9
DH
669 if (unlikely(iov_iter_is_folioq(i)))
670 return !i->count ? 0 :
671 umin(folioq_folio_size(i->folioq, i->folioq_slot), i->count);
28f38db7 672 return i->count;
62a8067a
AV
673}
674EXPORT_SYMBOL(iov_iter_single_seg_count);
675
aa563d7b 676void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
05afcb77 677 const struct kvec *kvec, unsigned long nr_segs,
abb78f87
AV
678 size_t count)
679{
aa563d7b 680 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
681 *i = (struct iov_iter){
682 .iter_type = ITER_KVEC,
683 .data_source = direction,
684 .kvec = kvec,
685 .nr_segs = nr_segs,
686 .iov_offset = 0,
687 .count = count
688 };
abb78f87
AV
689}
690EXPORT_SYMBOL(iov_iter_kvec);
691
aa563d7b 692void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
05afcb77
AV
693 const struct bio_vec *bvec, unsigned long nr_segs,
694 size_t count)
695{
aa563d7b 696 WARN_ON(direction & ~(READ | WRITE));
8cd54c1c
AV
697 *i = (struct iov_iter){
698 .iter_type = ITER_BVEC,
699 .data_source = direction,
700 .bvec = bvec,
701 .nr_segs = nr_segs,
702 .iov_offset = 0,
703 .count = count
704 };
05afcb77
AV
705}
706EXPORT_SYMBOL(iov_iter_bvec);
707
db0aa2e9
DH
708/**
709 * iov_iter_folio_queue - Initialise an I/O iterator to use the folios in a folio queue
710 * @i: The iterator to initialise.
711 * @direction: The direction of the transfer.
712 * @folioq: The starting point in the folio queue.
713 * @first_slot: The first slot in the folio queue to use
714 * @offset: The offset into the folio in the first slot to start at
715 * @count: The size of the I/O buffer in bytes.
716 *
717 * Set up an I/O iterator to either draw data out of the pages attached to an
718 * inode or to inject data into those pages. The pages *must* be prevented
719 * from evaporation, either by taking a ref on them or locking them by the
720 * caller.
721 */
722void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
723 const struct folio_queue *folioq, unsigned int first_slot,
724 unsigned int offset, size_t count)
725{
726 BUG_ON(direction & ~1);
727 *i = (struct iov_iter) {
728 .iter_type = ITER_FOLIOQ,
729 .data_source = direction,
730 .folioq = folioq,
731 .folioq_slot = first_slot,
732 .count = count,
733 .iov_offset = offset,
734 };
735}
736EXPORT_SYMBOL(iov_iter_folio_queue);
737
7ff50620
DH
738/**
739 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
740 * @i: The iterator to initialise.
741 * @direction: The direction of the transfer.
742 * @xarray: The xarray to access.
743 * @start: The start file position.
744 * @count: The size of the I/O buffer in bytes.
745 *
746 * Set up an I/O iterator to either draw data out of the pages attached to an
747 * inode or to inject data into those pages. The pages *must* be prevented
748 * from evaporation, either by taking a ref on them or locking them by the
749 * caller.
750 */
751void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
752 struct xarray *xarray, loff_t start, size_t count)
753{
754 BUG_ON(direction & ~1);
8cd54c1c
AV
755 *i = (struct iov_iter) {
756 .iter_type = ITER_XARRAY,
757 .data_source = direction,
758 .xarray = xarray,
759 .xarray_start = start,
760 .count = count,
761 .iov_offset = 0
762 };
7ff50620
DH
763}
764EXPORT_SYMBOL(iov_iter_xarray);
765
9ea9ce04
DH
766/**
767 * iov_iter_discard - Initialise an I/O iterator that discards data
768 * @i: The iterator to initialise.
769 * @direction: The direction of the transfer.
770 * @count: The size of the I/O buffer in bytes.
771 *
772 * Set up an I/O iterator that just discards everything that's written to it.
773 * It's only available as a READ iterator.
774 */
775void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
776{
777 BUG_ON(direction != READ);
8cd54c1c
AV
778 *i = (struct iov_iter){
779 .iter_type = ITER_DISCARD,
780 .data_source = false,
781 .count = count,
782 .iov_offset = 0
783 };
9ea9ce04
DH
784}
785EXPORT_SYMBOL(iov_iter_discard);
786
cfa320f7
KB
787static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
788 unsigned len_mask)
789{
2263639f 790 const struct iovec *iov = iter_iov(i);
cfa320f7
KB
791 size_t size = i->count;
792 size_t skip = i->iov_offset;
cfa320f7 793
2263639f 794 do {
de4f5fed 795 size_t len = iov->iov_len - skip;
cfa320f7
KB
796
797 if (len > size)
798 len = size;
799 if (len & len_mask)
800 return false;
de4f5fed 801 if ((unsigned long)(iov->iov_base + skip) & addr_mask)
cfa320f7
KB
802 return false;
803
2263639f 804 iov++;
cfa320f7 805 size -= len;
2263639f
JA
806 skip = 0;
807 } while (size);
808
cfa320f7
KB
809 return true;
810}
811
812static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
813 unsigned len_mask)
814{
2263639f 815 const struct bio_vec *bvec = i->bvec;
cfa320f7 816 unsigned skip = i->iov_offset;
2263639f 817 size_t size = i->count;
cfa320f7 818
2263639f 819 do {
334d7c4f 820 size_t len = bvec->bv_len - skip;
cfa320f7
KB
821
822 if (len > size)
823 len = size;
824 if (len & len_mask)
825 return false;
2263639f 826 if ((unsigned long)(bvec->bv_offset + skip) & addr_mask)
cfa320f7
KB
827 return false;
828
2263639f 829 bvec++;
cfa320f7 830 size -= len;
2263639f
JA
831 skip = 0;
832 } while (size);
833
cfa320f7
KB
834 return true;
835}
836
837/**
838 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
839 * are aligned to the parameters.
840 *
841 * @i: &struct iov_iter to restore
842 * @addr_mask: bit mask to check against the iov element's addresses
843 * @len_mask: bit mask to check against the iov element's lengths
844 *
845 * Return: false if any addresses or lengths intersect with the provided masks
846 */
847bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
848 unsigned len_mask)
849{
fcb14cb1
AV
850 if (likely(iter_is_ubuf(i))) {
851 if (i->count & len_mask)
852 return false;
853 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
854 return false;
855 return true;
856 }
857
cfa320f7
KB
858 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
859 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
860
861 if (iov_iter_is_bvec(i))
862 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
863
db0aa2e9 864 /* With both xarray and folioq types, we're dealing with whole folios. */
cfa320f7
KB
865 if (iov_iter_is_xarray(i)) {
866 if (i->count & len_mask)
867 return false;
868 if ((i->xarray_start + i->iov_offset) & addr_mask)
869 return false;
870 }
db0aa2e9
DH
871 if (iov_iter_is_folioq(i)) {
872 if (i->count & len_mask)
873 return false;
874 if (i->iov_offset & addr_mask)
875 return false;
876 }
cfa320f7
KB
877
878 return true;
879}
880EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
881
9221d2e3 882static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
62a8067a 883{
2263639f 884 const struct iovec *iov = iter_iov(i);
04a31165
AV
885 unsigned long res = 0;
886 size_t size = i->count;
9221d2e3 887 size_t skip = i->iov_offset;
9221d2e3 888
2263639f 889 do {
de4f5fed 890 size_t len = iov->iov_len - skip;
9221d2e3 891 if (len) {
de4f5fed 892 res |= (unsigned long)iov->iov_base + skip;
9221d2e3
AV
893 if (len > size)
894 len = size;
895 res |= len;
896 size -= len;
9221d2e3 897 }
2263639f
JA
898 iov++;
899 skip = 0;
900 } while (size);
9221d2e3
AV
901 return res;
902}
04a31165 903
9221d2e3
AV
904static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
905{
2263639f 906 const struct bio_vec *bvec = i->bvec;
9221d2e3
AV
907 unsigned res = 0;
908 size_t size = i->count;
909 unsigned skip = i->iov_offset;
9221d2e3 910
2263639f
JA
911 do {
912 size_t len = bvec->bv_len - skip;
913 res |= (unsigned long)bvec->bv_offset + skip;
9221d2e3
AV
914 if (len > size)
915 len = size;
916 res |= len;
2263639f 917 bvec++;
9221d2e3 918 size -= len;
2263639f
JA
919 skip = 0;
920 } while (size);
921
9221d2e3
AV
922 return res;
923}
924
925unsigned long iov_iter_alignment(const struct iov_iter *i)
926{
fcb14cb1
AV
927 if (likely(iter_is_ubuf(i))) {
928 size_t size = i->count;
929 if (size)
930 return ((unsigned long)i->ubuf + i->iov_offset) | size;
931 return 0;
932 }
933
9221d2e3
AV
934 /* iovec and kvec have identical layouts */
935 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
936 return iov_iter_alignment_iovec(i);
937
938 if (iov_iter_is_bvec(i))
939 return iov_iter_alignment_bvec(i);
940
db0aa2e9
DH
941 /* With both xarray and folioq types, we're dealing with whole folios. */
942 if (iov_iter_is_folioq(i))
943 return i->iov_offset | i->count;
9221d2e3 944 if (iov_iter_is_xarray(i))
3d14ec1f 945 return (i->xarray_start + i->iov_offset) | i->count;
9221d2e3
AV
946
947 return 0;
62a8067a
AV
948}
949EXPORT_SYMBOL(iov_iter_alignment);
950
357f435d
AV
951unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
952{
33844e66 953 unsigned long res = 0;
610c7a71 954 unsigned long v = 0;
357f435d 955 size_t size = i->count;
610c7a71 956 unsigned k;
357f435d 957
fcb14cb1
AV
958 if (iter_is_ubuf(i))
959 return 0;
960
610c7a71 961 if (WARN_ON(!iter_is_iovec(i)))
241699cd 962 return ~0U;
241699cd 963
610c7a71 964 for (k = 0; k < i->nr_segs; k++) {
de4f5fed
JA
965 const struct iovec *iov = iter_iov(i) + k;
966 if (iov->iov_len) {
967 unsigned long base = (unsigned long)iov->iov_base;
610c7a71
AV
968 if (v) // if not the first one
969 res |= base | v; // this start | previous end
de4f5fed
JA
970 v = base + iov->iov_len;
971 if (size <= iov->iov_len)
610c7a71 972 break;
de4f5fed 973 size -= iov->iov_len;
610c7a71
AV
974 }
975 }
33844e66 976 return res;
357f435d
AV
977}
978EXPORT_SYMBOL(iov_iter_gap_alignment);
979
3cf42da3
AV
980static int want_pages_array(struct page ***res, size_t size,
981 size_t start, unsigned int maxpages)
acbdeb83 982{
3cf42da3
AV
983 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
984
985 if (count > maxpages)
986 count = maxpages;
987 WARN_ON(!count); // caller should've prevented that
988 if (!*res) {
989 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
990 if (!*res)
991 return 0;
992 }
993 return count;
acbdeb83
AV
994}
995
db0aa2e9
DH
996static ssize_t iter_folioq_get_pages(struct iov_iter *iter,
997 struct page ***ppages, size_t maxsize,
998 unsigned maxpages, size_t *_start_offset)
999{
1000 const struct folio_queue *folioq = iter->folioq;
1001 struct page **pages;
1002 unsigned int slot = iter->folioq_slot;
1003 size_t extracted = 0, count = iter->count, iov_offset = iter->iov_offset;
1004
1005 if (slot >= folioq_nr_slots(folioq)) {
1006 folioq = folioq->next;
1007 slot = 0;
1008 if (WARN_ON(iov_offset != 0))
1009 return -EIO;
1010 }
1011
1012 maxpages = want_pages_array(ppages, maxsize, iov_offset & ~PAGE_MASK, maxpages);
1013 if (!maxpages)
1014 return -ENOMEM;
1015 *_start_offset = iov_offset & ~PAGE_MASK;
1016 pages = *ppages;
1017
1018 for (;;) {
1019 struct folio *folio = folioq_folio(folioq, slot);
1020 size_t offset = iov_offset, fsize = folioq_folio_size(folioq, slot);
1021 size_t part = PAGE_SIZE - offset % PAGE_SIZE;
1022
e65a0dc1
DH
1023 if (offset < fsize) {
1024 part = umin(part, umin(maxsize - extracted, fsize - offset));
1025 count -= part;
1026 iov_offset += part;
1027 extracted += part;
1028
1029 *pages = folio_page(folio, offset / PAGE_SIZE);
1030 get_page(*pages);
1031 pages++;
1032 maxpages--;
1033 }
1034
db0aa2e9
DH
1035 if (maxpages == 0 || extracted >= maxsize)
1036 break;
1037
0d24852b 1038 if (iov_offset >= fsize) {
db0aa2e9
DH
1039 iov_offset = 0;
1040 slot++;
1041 if (slot == folioq_nr_slots(folioq) && folioq->next) {
1042 folioq = folioq->next;
1043 slot = 0;
1044 }
1045 }
1046 }
1047
1048 iter->count = count;
1049 iter->iov_offset = iov_offset;
1050 iter->folioq = folioq;
1051 iter->folioq_slot = slot;
1052 return extracted;
1053}
1054
7ff50620
DH
1055static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1056 pgoff_t index, unsigned int nr_pages)
1057{
1058 XA_STATE(xas, xa, index);
b57f4f4f 1059 struct folio *folio;
7ff50620
DH
1060 unsigned int ret = 0;
1061
1062 rcu_read_lock();
b57f4f4f
MWO
1063 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
1064 if (xas_retry(&xas, folio))
7ff50620
DH
1065 continue;
1066
b57f4f4f
MWO
1067 /* Has the folio moved or been split? */
1068 if (unlikely(folio != xas_reload(&xas))) {
7ff50620
DH
1069 xas_reset(&xas);
1070 continue;
1071 }
1072
b57f4f4f
MWO
1073 pages[ret] = folio_file_page(folio, xas.xa_index);
1074 folio_get(folio);
7ff50620
DH
1075 if (++ret == nr_pages)
1076 break;
1077 }
1078 rcu_read_unlock();
1079 return ret;
1080}
1081
1082static ssize_t iter_xarray_get_pages(struct iov_iter *i,
68fe506f 1083 struct page ***pages, size_t maxsize,
7ff50620
DH
1084 unsigned maxpages, size_t *_start_offset)
1085{
3cf42da3
AV
1086 unsigned nr, offset, count;
1087 pgoff_t index;
7ff50620
DH
1088 loff_t pos;
1089
7ff50620
DH
1090 pos = i->xarray_start + i->iov_offset;
1091 index = pos >> PAGE_SHIFT;
1092 offset = pos & ~PAGE_MASK;
1093 *_start_offset = offset;
1094
3cf42da3
AV
1095 count = want_pages_array(pages, maxsize, offset, maxpages);
1096 if (!count)
1097 return -ENOMEM;
68fe506f 1098 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
7ff50620
DH
1099 if (nr == 0)
1100 return 0;
1101
eba2d3d7 1102 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
310d9d5a
AV
1103 i->iov_offset += maxsize;
1104 i->count -= maxsize;
eba2d3d7 1105 return maxsize;
7ff50620
DH
1106}
1107
fcb14cb1 1108/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
dd45ab9d 1109static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
3d671ca6
AV
1110{
1111 size_t skip;
1112 long k;
1113
fcb14cb1
AV
1114 if (iter_is_ubuf(i))
1115 return (unsigned long)i->ubuf + i->iov_offset;
1116
3d671ca6 1117 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
de4f5fed
JA
1118 const struct iovec *iov = iter_iov(i) + k;
1119 size_t len = iov->iov_len - skip;
3d671ca6
AV
1120
1121 if (unlikely(!len))
1122 continue;
59dbd7d0
AV
1123 if (*size > len)
1124 *size = len;
de4f5fed 1125 return (unsigned long)iov->iov_base + skip;
3d671ca6
AV
1126 }
1127 BUG(); // if it had been empty, we wouldn't get called
1128}
1129
1130/* must be done on non-empty ITER_BVEC one */
1131static struct page *first_bvec_segment(const struct iov_iter *i,
59dbd7d0 1132 size_t *size, size_t *start)
3d671ca6
AV
1133{
1134 struct page *page;
1135 size_t skip = i->iov_offset, len;
1136
1137 len = i->bvec->bv_len - skip;
59dbd7d0
AV
1138 if (*size > len)
1139 *size = len;
3d671ca6
AV
1140 skip += i->bvec->bv_offset;
1141 page = i->bvec->bv_page + skip / PAGE_SIZE;
dda8e5d1 1142 *start = skip % PAGE_SIZE;
3d671ca6
AV
1143 return page;
1144}
1145
451c0ba9
AV
1146static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
1147 struct page ***pages, size_t maxsize,
84bd06c6 1148 unsigned int maxpages, size_t *start)
62a8067a 1149{
f62e52d1 1150 unsigned int n, gup_flags = 0;
3d671ca6 1151
e5393fae
AV
1152 if (maxsize > i->count)
1153 maxsize = i->count;
451c0ba9 1154 if (!maxsize)
3d671ca6 1155 return 0;
7392ed17
AV
1156 if (maxsize > MAX_RW_COUNT)
1157 maxsize = MAX_RW_COUNT;
e5393fae 1158
fcb14cb1 1159 if (likely(user_backed_iter(i))) {
3d671ca6 1160 unsigned long addr;
3cf42da3 1161 int res;
e5393fae 1162
3337ab08
AG
1163 if (iov_iter_rw(i) != WRITE)
1164 gup_flags |= FOLL_WRITE;
1165 if (i->nofault)
1166 gup_flags |= FOLL_NOFAULT;
1167
dd45ab9d
AV
1168 addr = first_iovec_segment(i, &maxsize);
1169 *start = addr % PAGE_SIZE;
1170 addr &= PAGE_MASK;
3cf42da3
AV
1171 n = want_pages_array(pages, maxsize, *start, maxpages);
1172 if (!n)
1173 return -ENOMEM;
451c0ba9 1174 res = get_user_pages_fast(addr, n, gup_flags, *pages);
814a6674 1175 if (unlikely(res <= 0))
e5393fae 1176 return res;
eba2d3d7
AV
1177 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1178 iov_iter_advance(i, maxsize);
1179 return maxsize;
3d671ca6
AV
1180 }
1181 if (iov_iter_is_bvec(i)) {
451c0ba9 1182 struct page **p;
3d671ca6
AV
1183 struct page *page;
1184
59dbd7d0 1185 page = first_bvec_segment(i, &maxsize, start);
3cf42da3
AV
1186 n = want_pages_array(pages, maxsize, *start, maxpages);
1187 if (!n)
1188 return -ENOMEM;
451c0ba9 1189 p = *pages;
b9c0e49a 1190 for (int k = 0; k < n; k++) {
770c8d55 1191 struct folio *folio = page_folio(page + k);
b9c0e49a
MWO
1192 p[k] = page + k;
1193 if (!folio_test_slab(folio))
1194 folio_get(folio);
1195 }
eba2d3d7 1196 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
310d9d5a
AV
1197 i->count -= maxsize;
1198 i->iov_offset += maxsize;
1199 if (i->iov_offset == i->bvec->bv_len) {
1200 i->iov_offset = 0;
1201 i->bvec++;
1202 i->nr_segs--;
1203 }
eba2d3d7 1204 return maxsize;
3d671ca6 1205 }
db0aa2e9
DH
1206 if (iov_iter_is_folioq(i))
1207 return iter_folioq_get_pages(i, pages, maxsize, maxpages, start);
3d671ca6 1208 if (iov_iter_is_xarray(i))
451c0ba9 1209 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
3d671ca6 1210 return -EFAULT;
62a8067a 1211}
62a8067a 1212
84bd06c6
CH
1213ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
1214 size_t maxsize, unsigned maxpages, size_t *start)
62a8067a 1215{
451c0ba9 1216 if (!maxpages)
3d671ca6 1217 return 0;
451c0ba9 1218 BUG_ON(!pages);
3d671ca6 1219
84bd06c6 1220 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start);
62a8067a 1221}
eba2d3d7 1222EXPORT_SYMBOL(iov_iter_get_pages2);
91329559 1223
84bd06c6
CH
1224ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
1225 struct page ***pages, size_t maxsize, size_t *start)
91329559
AV
1226{
1227 ssize_t len;
1228
1229 *pages = NULL;
1230
84bd06c6 1231 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start);
91329559
AV
1232 if (len <= 0) {
1233 kvfree(*pages);
1234 *pages = NULL;
1235 }
1236 return len;
1237}
eba2d3d7 1238EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
62a8067a 1239
66531c65 1240static int iov_npages(const struct iov_iter *i, int maxpages)
62a8067a 1241{
66531c65
AV
1242 size_t skip = i->iov_offset, size = i->count;
1243 const struct iovec *p;
e0f2dc40
AV
1244 int npages = 0;
1245
de4f5fed 1246 for (p = iter_iov(i); size; skip = 0, p++) {
66531c65
AV
1247 unsigned offs = offset_in_page(p->iov_base + skip);
1248 size_t len = min(p->iov_len - skip, size);
e0f2dc40 1249
66531c65
AV
1250 if (len) {
1251 size -= len;
1252 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1253 if (unlikely(npages > maxpages))
1254 return maxpages;
1255 }
1256 }
1257 return npages;
1258}
1259
1260static int bvec_npages(const struct iov_iter *i, int maxpages)
1261{
1262 size_t skip = i->iov_offset, size = i->count;
1263 const struct bio_vec *p;
1264 int npages = 0;
1265
1266 for (p = i->bvec; size; skip = 0, p++) {
1267 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1268 size_t len = min(p->bv_len - skip, size);
1269
1270 size -= len;
1271 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1272 if (unlikely(npages > maxpages))
1273 return maxpages;
1274 }
1275 return npages;
1276}
1277
1278int iov_iter_npages(const struct iov_iter *i, int maxpages)
1279{
1280 if (unlikely(!i->count))
1281 return 0;
fcb14cb1
AV
1282 if (likely(iter_is_ubuf(i))) {
1283 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1284 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1285 return min(npages, maxpages);
1286 }
66531c65
AV
1287 /* iovec and kvec have identical layouts */
1288 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1289 return iov_npages(i, maxpages);
1290 if (iov_iter_is_bvec(i))
1291 return bvec_npages(i, maxpages);
db0aa2e9
DH
1292 if (iov_iter_is_folioq(i)) {
1293 unsigned offset = i->iov_offset % PAGE_SIZE;
1294 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1295 return min(npages, maxpages);
1296 }
66531c65 1297 if (iov_iter_is_xarray(i)) {
e4f8df86
AV
1298 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1299 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
66531c65
AV
1300 return min(npages, maxpages);
1301 }
1302 return 0;
62a8067a 1303}
f67da30c 1304EXPORT_SYMBOL(iov_iter_npages);
4b8164b9
AV
1305
1306const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1307{
1308 *new = *old;
00e23707 1309 if (iov_iter_is_bvec(new))
4b8164b9
AV
1310 return new->bvec = kmemdup(new->bvec,
1311 new->nr_segs * sizeof(struct bio_vec),
1312 flags);
fcb14cb1 1313 else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
4b8164b9 1314 /* iovec and kvec have identical layout */
de4f5fed 1315 return new->__iov = kmemdup(new->__iov,
4b8164b9
AV
1316 new->nr_segs * sizeof(struct iovec),
1317 flags);
fcb14cb1 1318 return NULL;
4b8164b9
AV
1319}
1320EXPORT_SYMBOL(dup_iter);
bc917be8 1321
50f9a76e 1322static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
bd8c239c 1323 const struct iovec __user *uvec, u32 nr_segs)
bfdc5970
CH
1324{
1325 const struct compat_iovec __user *uiov =
1326 (const struct compat_iovec __user *)uvec;
bd8c239c
KC
1327 int ret = -EFAULT;
1328 u32 i;
bfdc5970 1329
a959a978 1330 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
bfdc5970
CH
1331 return -EFAULT;
1332
1333 for (i = 0; i < nr_segs; i++) {
1334 compat_uptr_t buf;
1335 compat_ssize_t len;
1336
1337 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1338 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1339
1340 /* check for compat_size_t not fitting in compat_ssize_t .. */
1341 if (len < 0) {
1342 ret = -EINVAL;
1343 goto uaccess_end;
1344 }
1345 iov[i].iov_base = compat_ptr(buf);
1346 iov[i].iov_len = len;
1347 }
1348
1349 ret = 0;
1350uaccess_end:
1351 user_access_end();
1352 return ret;
1353}
1354
719a937b 1355static __noclone int copy_iovec_from_user(struct iovec *iov,
487c20b0 1356 const struct iovec __user *uiov, unsigned long nr_segs)
fb041b59 1357{
487c20b0 1358 int ret = -EFAULT;
fb041b59 1359
487c20b0 1360 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
bfdc5970 1361 return -EFAULT;
fb041b59 1362
487c20b0
LT
1363 do {
1364 void __user *buf;
1365 ssize_t len;
1366
1367 unsafe_get_user(len, &uiov->iov_len, uaccess_end);
1368 unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
1369
1370 /* check for size_t not fitting in ssize_t .. */
1371 if (unlikely(len < 0)) {
1372 ret = -EINVAL;
1373 goto uaccess_end;
1374 }
1375 iov->iov_base = buf;
1376 iov->iov_len = len;
1377
1378 uiov++; iov++;
1379 } while (--nr_segs);
1380
1381 ret = 0;
1382uaccess_end:
1383 user_access_end();
1384 return ret;
bfdc5970
CH
1385}
1386
1387struct iovec *iovec_from_user(const struct iovec __user *uvec,
1388 unsigned long nr_segs, unsigned long fast_segs,
1389 struct iovec *fast_iov, bool compat)
1390{
1391 struct iovec *iov = fast_iov;
1392 int ret;
1393
fb041b59 1394 /*
bfdc5970
CH
1395 * SuS says "The readv() function *may* fail if the iovcnt argument was
1396 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1397 * traditionally returned zero for zero segments, so...
fb041b59 1398 */
bfdc5970
CH
1399 if (nr_segs == 0)
1400 return iov;
1401 if (nr_segs > UIO_MAXIOV)
1402 return ERR_PTR(-EINVAL);
fb041b59
DL
1403 if (nr_segs > fast_segs) {
1404 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
bfdc5970
CH
1405 if (!iov)
1406 return ERR_PTR(-ENOMEM);
fb041b59 1407 }
bfdc5970 1408
487c20b0 1409 if (unlikely(compat))
bfdc5970
CH
1410 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1411 else
1412 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1413 if (ret) {
1414 if (iov != fast_iov)
1415 kfree(iov);
1416 return ERR_PTR(ret);
1417 }
1418
1419 return iov;
1420}
1421
3b2deb0e
JA
1422/*
1423 * Single segment iovec supplied by the user, import it as ITER_UBUF.
1424 */
1425static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
1426 struct iovec **iovp, struct iov_iter *i,
1427 bool compat)
1428{
1429 struct iovec *iov = *iovp;
1430 ssize_t ret;
1431
f4b78260
PB
1432 *iovp = NULL;
1433
3b2deb0e
JA
1434 if (compat)
1435 ret = copy_compat_iovec_from_user(iov, uvec, 1);
1436 else
1437 ret = copy_iovec_from_user(iov, uvec, 1);
1438 if (unlikely(ret))
1439 return ret;
1440
1441 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
1442 if (unlikely(ret))
1443 return ret;
3b2deb0e
JA
1444 return i->count;
1445}
1446
bfdc5970
CH
1447ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1448 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1449 struct iov_iter *i, bool compat)
1450{
1451 ssize_t total_len = 0;
1452 unsigned long seg;
1453 struct iovec *iov;
1454
3b2deb0e
JA
1455 if (nr_segs == 1)
1456 return __import_iovec_ubuf(type, uvec, iovp, i, compat);
1457
bfdc5970
CH
1458 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1459 if (IS_ERR(iov)) {
1460 *iovp = NULL;
1461 return PTR_ERR(iov);
fb041b59
DL
1462 }
1463
1464 /*
bfdc5970
CH
1465 * According to the Single Unix Specification we should return EINVAL if
1466 * an element length is < 0 when cast to ssize_t or if the total length
1467 * would overflow the ssize_t return value of the system call.
fb041b59
DL
1468 *
1469 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1470 * overflow case.
1471 */
fb041b59 1472 for (seg = 0; seg < nr_segs; seg++) {
fb041b59
DL
1473 ssize_t len = (ssize_t)iov[seg].iov_len;
1474
bfdc5970
CH
1475 if (!access_ok(iov[seg].iov_base, len)) {
1476 if (iov != *iovp)
1477 kfree(iov);
1478 *iovp = NULL;
1479 return -EFAULT;
fb041b59 1480 }
bfdc5970
CH
1481
1482 if (len > MAX_RW_COUNT - total_len) {
1483 len = MAX_RW_COUNT - total_len;
fb041b59
DL
1484 iov[seg].iov_len = len;
1485 }
bfdc5970 1486 total_len += len;
fb041b59 1487 }
bfdc5970
CH
1488
1489 iov_iter_init(i, type, iov, nr_segs, total_len);
1490 if (iov == *iovp)
1491 *iovp = NULL;
1492 else
1493 *iovp = iov;
1494 return total_len;
fb041b59
DL
1495}
1496
ffecee4f
VN
1497/**
1498 * import_iovec() - Copy an array of &struct iovec from userspace
1499 * into the kernel, check that it is valid, and initialize a new
1500 * &struct iov_iter iterator to access it.
1501 *
1502 * @type: One of %READ or %WRITE.
bfdc5970 1503 * @uvec: Pointer to the userspace array.
ffecee4f
VN
1504 * @nr_segs: Number of elements in userspace array.
1505 * @fast_segs: Number of elements in @iov.
bfdc5970 1506 * @iovp: (input and output parameter) Pointer to pointer to (usually small
ffecee4f
VN
1507 * on-stack) kernel array.
1508 * @i: Pointer to iterator that will be initialized on success.
1509 *
1510 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1511 * then this function places %NULL in *@iov on return. Otherwise, a new
1512 * array will be allocated and the result placed in *@iov. This means that
1513 * the caller may call kfree() on *@iov regardless of whether the small
1514 * on-stack array was used or not (and regardless of whether this function
1515 * returns an error or not).
1516 *
87e5e6da 1517 * Return: Negative error code on error, bytes imported on success
ffecee4f 1518 */
bfdc5970 1519ssize_t import_iovec(int type, const struct iovec __user *uvec,
bc917be8 1520 unsigned nr_segs, unsigned fast_segs,
bfdc5970 1521 struct iovec **iovp, struct iov_iter *i)
bc917be8 1522{
89cd35c5
CH
1523 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1524 in_compat_syscall());
bc917be8
AV
1525}
1526EXPORT_SYMBOL(import_iovec);
1527
2ad9bd83
JA
1528int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
1529{
1530 if (len > MAX_RW_COUNT)
1531 len = MAX_RW_COUNT;
1532 if (unlikely(!access_ok(buf, len)))
1533 return -EFAULT;
1534
1535 iov_iter_ubuf(i, rw, buf, len);
1536 return 0;
1537}
70e969eb 1538EXPORT_SYMBOL_GPL(import_ubuf);
2ad9bd83 1539
8fb0f47a
JA
1540/**
1541 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1542 * iov_iter_save_state() was called.
1543 *
1544 * @i: &struct iov_iter to restore
1545 * @state: state to restore from
1546 *
1547 * Used after iov_iter_save_state() to bring restore @i, if operations may
1548 * have advanced it.
1549 *
1550 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1551 */
1552void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1553{
4397a17c
KB
1554 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
1555 !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
8fb0f47a
JA
1556 return;
1557 i->iov_offset = state->iov_offset;
1558 i->count = state->count;
fcb14cb1
AV
1559 if (iter_is_ubuf(i))
1560 return;
8fb0f47a
JA
1561 /*
1562 * For the *vec iters, nr_segs + iov is constant - if we increment
1563 * the vec, then we also decrement the nr_segs count. Hence we don't
1564 * need to track both of these, just one is enough and we can deduct
1565 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
1566 * size, so we can just increment the iov pointer as they are unionzed.
1567 * ITER_BVEC _may_ be the same size on some archs, but on others it is
1568 * not. Be safe and handle it separately.
1569 */
1570 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
1571 if (iov_iter_is_bvec(i))
1572 i->bvec -= state->nr_segs - i->nr_segs;
1573 else
de4f5fed 1574 i->__iov -= state->nr_segs - i->nr_segs;
8fb0f47a
JA
1575 i->nr_segs = state->nr_segs;
1576}
7d58fe73 1577
db0aa2e9
DH
1578/*
1579 * Extract a list of contiguous pages from an ITER_FOLIOQ iterator. This does
1580 * not get references on the pages, nor does it get a pin on them.
1581 */
1582static ssize_t iov_iter_extract_folioq_pages(struct iov_iter *i,
1583 struct page ***pages, size_t maxsize,
1584 unsigned int maxpages,
1585 iov_iter_extraction_t extraction_flags,
1586 size_t *offset0)
1587{
1588 const struct folio_queue *folioq = i->folioq;
1589 struct page **p;
1590 unsigned int nr = 0;
1591 size_t extracted = 0, offset, slot = i->folioq_slot;
1592
1593 if (slot >= folioq_nr_slots(folioq)) {
1594 folioq = folioq->next;
1595 slot = 0;
1596 if (WARN_ON(i->iov_offset != 0))
1597 return -EIO;
1598 }
1599
1600 offset = i->iov_offset & ~PAGE_MASK;
1601 *offset0 = offset;
1602
1603 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1604 if (!maxpages)
1605 return -ENOMEM;
1606 p = *pages;
1607
1608 for (;;) {
1609 struct folio *folio = folioq_folio(folioq, slot);
1610 size_t offset = i->iov_offset, fsize = folioq_folio_size(folioq, slot);
1611 size_t part = PAGE_SIZE - offset % PAGE_SIZE;
1612
1613 if (offset < fsize) {
1614 part = umin(part, umin(maxsize - extracted, fsize - offset));
1615 i->count -= part;
1616 i->iov_offset += part;
1617 extracted += part;
1618
1619 p[nr++] = folio_page(folio, offset / PAGE_SIZE);
1620 }
1621
1622 if (nr >= maxpages || extracted >= maxsize)
1623 break;
1624
1625 if (i->iov_offset >= fsize) {
1626 i->iov_offset = 0;
1627 slot++;
1628 if (slot == folioq_nr_slots(folioq) && folioq->next) {
1629 folioq = folioq->next;
1630 slot = 0;
1631 }
1632 }
1633 }
1634
1635 i->folioq = folioq;
1636 i->folioq_slot = slot;
1637 return extracted;
1638}
1639
7d58fe73
DH
1640/*
1641 * Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not
1642 * get references on the pages, nor does it get a pin on them.
1643 */
1644static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
1645 struct page ***pages, size_t maxsize,
1646 unsigned int maxpages,
1647 iov_iter_extraction_t extraction_flags,
1648 size_t *offset0)
1649{
70d1be00
MWO
1650 struct page **p;
1651 struct folio *folio;
7d58fe73
DH
1652 unsigned int nr = 0, offset;
1653 loff_t pos = i->xarray_start + i->iov_offset;
70d1be00 1654 XA_STATE(xas, i->xarray, pos >> PAGE_SHIFT);
7d58fe73
DH
1655
1656 offset = pos & ~PAGE_MASK;
1657 *offset0 = offset;
1658
1659 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1660 if (!maxpages)
1661 return -ENOMEM;
1662 p = *pages;
1663
1664 rcu_read_lock();
70d1be00
MWO
1665 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
1666 if (xas_retry(&xas, folio))
7d58fe73
DH
1667 continue;
1668
70d1be00
MWO
1669 /* Has the folio moved or been split? */
1670 if (unlikely(folio != xas_reload(&xas))) {
7d58fe73
DH
1671 xas_reset(&xas);
1672 continue;
1673 }
1674
70d1be00 1675 p[nr++] = folio_file_page(folio, xas.xa_index);
7d58fe73
DH
1676 if (nr == maxpages)
1677 break;
1678 }
1679 rcu_read_unlock();
1680
1681 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1682 iov_iter_advance(i, maxsize);
1683 return maxsize;
1684}
1685
1686/*
e4e535bf
ML
1687 * Extract a list of virtually contiguous pages from an ITER_BVEC iterator.
1688 * This does not get references on the pages, nor does it get a pin on them.
7d58fe73
DH
1689 */
1690static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
1691 struct page ***pages, size_t maxsize,
1692 unsigned int maxpages,
1693 iov_iter_extraction_t extraction_flags,
1694 size_t *offset0)
1695{
e4e535bf
ML
1696 size_t skip = i->iov_offset, size = 0;
1697 struct bvec_iter bi;
1698 int k = 0;
7d58fe73 1699
e4e535bf
ML
1700 if (i->nr_segs == 0)
1701 return 0;
1702
1703 if (i->iov_offset == i->bvec->bv_len) {
7d58fe73
DH
1704 i->iov_offset = 0;
1705 i->nr_segs--;
1706 i->bvec++;
1707 skip = 0;
1708 }
496a51b3 1709 bi.bi_idx = 0;
341468e0 1710 bi.bi_size = maxsize;
e4e535bf
ML
1711 bi.bi_bvec_done = skip;
1712
1713 maxpages = want_pages_array(pages, maxsize, skip, maxpages);
1714
1715 while (bi.bi_size && bi.bi_idx < i->nr_segs) {
1716 struct bio_vec bv = bvec_iter_bvec(i->bvec, bi);
1717
1718 /*
1719 * The iov_iter_extract_pages interface only allows an offset
1720 * into the first page. Break out of the loop if we see an
1721 * offset into subsequent pages, the caller will have to call
1722 * iov_iter_extract_pages again for the reminder.
1723 */
1724 if (k) {
1725 if (bv.bv_offset)
1726 break;
1727 } else {
1728 *offset0 = bv.bv_offset;
1729 }
7d58fe73 1730
e4e535bf
ML
1731 (*pages)[k++] = bv.bv_page;
1732 size += bv.bv_len;
7d58fe73 1733
e4e535bf
ML
1734 if (k >= maxpages)
1735 break;
1736
1737 /*
1738 * We are done when the end of the bvec doesn't align to a page
1739 * boundary as that would create a hole in the returned space.
1740 * The caller will handle this with another call to
1741 * iov_iter_extract_pages.
1742 */
1743 if (bv.bv_offset + bv.bv_len != PAGE_SIZE)
1744 break;
1745
1746 bvec_iter_advance_single(i->bvec, &bi, bv.bv_len);
1747 }
7d58fe73 1748
f741bd71
DH
1749 iov_iter_advance(i, size);
1750 return size;
7d58fe73
DH
1751}
1752
1753/*
1754 * Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
1755 * This does not get references on the pages, nor does it get a pin on them.
1756 */
1757static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
1758 struct page ***pages, size_t maxsize,
1759 unsigned int maxpages,
1760 iov_iter_extraction_t extraction_flags,
1761 size_t *offset0)
1762{
1763 struct page **p, *page;
1764 const void *kaddr;
f741bd71 1765 size_t skip = i->iov_offset, offset, len, size;
7d58fe73
DH
1766 int k;
1767
1768 for (;;) {
1769 if (i->nr_segs == 0)
1770 return 0;
f741bd71
DH
1771 size = min(maxsize, i->kvec->iov_len - skip);
1772 if (size)
7d58fe73
DH
1773 break;
1774 i->iov_offset = 0;
1775 i->nr_segs--;
1776 i->kvec++;
1777 skip = 0;
1778 }
1779
1780 kaddr = i->kvec->iov_base + skip;
1781 offset = (unsigned long)kaddr & ~PAGE_MASK;
1782 *offset0 = offset;
1783
f741bd71 1784 maxpages = want_pages_array(pages, size, offset, maxpages);
7d58fe73
DH
1785 if (!maxpages)
1786 return -ENOMEM;
1787 p = *pages;
1788
1789 kaddr -= offset;
f741bd71 1790 len = offset + size;
7d58fe73
DH
1791 for (k = 0; k < maxpages; k++) {
1792 size_t seg = min_t(size_t, len, PAGE_SIZE);
1793
1794 if (is_vmalloc_or_module_addr(kaddr))
1795 page = vmalloc_to_page(kaddr);
1796 else
1797 page = virt_to_page(kaddr);
1798
1799 p[k] = page;
1800 len -= seg;
1801 kaddr += PAGE_SIZE;
1802 }
1803
f741bd71
DH
1804 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
1805 iov_iter_advance(i, size);
1806 return size;
7d58fe73
DH
1807}
1808
1809/*
1810 * Extract a list of contiguous pages from a user iterator and get a pin on
1811 * each of them. This should only be used if the iterator is user-backed
1812 * (IOBUF/UBUF).
1813 *
1814 * It does not get refs on the pages, but the pages must be unpinned by the
1815 * caller once the transfer is complete.
1816 *
1817 * This is safe to be used where background IO/DMA *is* going to be modifying
1818 * the buffer; using a pin rather than a ref makes forces fork() to give the
1819 * child a copy of the page.
1820 */
1821static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
1822 struct page ***pages,
1823 size_t maxsize,
1824 unsigned int maxpages,
1825 iov_iter_extraction_t extraction_flags,
1826 size_t *offset0)
1827{
1828 unsigned long addr;
1829 unsigned int gup_flags = 0;
1830 size_t offset;
1831 int res;
1832
1833 if (i->data_source == ITER_DEST)
1834 gup_flags |= FOLL_WRITE;
1835 if (extraction_flags & ITER_ALLOW_P2PDMA)
1836 gup_flags |= FOLL_PCI_P2PDMA;
1837 if (i->nofault)
1838 gup_flags |= FOLL_NOFAULT;
1839
1840 addr = first_iovec_segment(i, &maxsize);
1841 *offset0 = offset = addr % PAGE_SIZE;
1842 addr &= PAGE_MASK;
1843 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1844 if (!maxpages)
1845 return -ENOMEM;
1846 res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages);
1847 if (unlikely(res <= 0))
1848 return res;
1849 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
1850 iov_iter_advance(i, maxsize);
1851 return maxsize;
1852}
1853
1854/**
1855 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
1856 * @i: The iterator to extract from
1857 * @pages: Where to return the list of pages
1858 * @maxsize: The maximum amount of iterator to extract
1859 * @maxpages: The maximum size of the list of pages
1860 * @extraction_flags: Flags to qualify request
1861 * @offset0: Where to return the starting offset into (*@pages)[0]
1862 *
1863 * Extract a list of contiguous pages from the current point of the iterator,
1864 * advancing the iterator. The maximum number of pages and the maximum amount
1865 * of page contents can be set.
1866 *
1867 * If *@pages is NULL, a page list will be allocated to the required size and
1868 * *@pages will be set to its base. If *@pages is not NULL, it will be assumed
1869 * that the caller allocated a page list at least @maxpages in size and this
1870 * will be filled in.
1871 *
1872 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
1873 * be allowed on the pages extracted.
1874 *
1875 * The iov_iter_extract_will_pin() function can be used to query how cleanup
1876 * should be performed.
1877 *
1878 * Extra refs or pins on the pages may be obtained as follows:
1879 *
1880 * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
1881 * added to the pages, but refs will not be taken.
1882 * iov_iter_extract_will_pin() will return true.
1883 *
db0aa2e9
DH
1884 * (*) If the iterator is ITER_KVEC, ITER_BVEC, ITER_FOLIOQ or ITER_XARRAY, the
1885 * pages are merely listed; no extra refs or pins are obtained.
7d58fe73
DH
1886 * iov_iter_extract_will_pin() will return 0.
1887 *
1888 * Note also:
1889 *
1890 * (*) Use with ITER_DISCARD is not supported as that has no content.
1891 *
1892 * On success, the function sets *@pages to the new pagelist, if allocated, and
1893 * sets *offset0 to the offset into the first page.
1894 *
1895 * It may also return -ENOMEM and -EFAULT.
1896 */
1897ssize_t iov_iter_extract_pages(struct iov_iter *i,
1898 struct page ***pages,
1899 size_t maxsize,
1900 unsigned int maxpages,
1901 iov_iter_extraction_t extraction_flags,
1902 size_t *offset0)
1903{
1904 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
1905 if (!maxsize)
1906 return 0;
1907
1908 if (likely(user_backed_iter(i)))
1909 return iov_iter_extract_user_pages(i, pages, maxsize,
1910 maxpages, extraction_flags,
1911 offset0);
1912 if (iov_iter_is_kvec(i))
1913 return iov_iter_extract_kvec_pages(i, pages, maxsize,
1914 maxpages, extraction_flags,
1915 offset0);
1916 if (iov_iter_is_bvec(i))
1917 return iov_iter_extract_bvec_pages(i, pages, maxsize,
1918 maxpages, extraction_flags,
1919 offset0);
db0aa2e9
DH
1920 if (iov_iter_is_folioq(i))
1921 return iov_iter_extract_folioq_pages(i, pages, maxsize,
1922 maxpages, extraction_flags,
1923 offset0);
7d58fe73
DH
1924 if (iov_iter_is_xarray(i))
1925 return iov_iter_extract_xarray_pages(i, pages, maxsize,
1926 maxpages, extraction_flags,
1927 offset0);
1928 return -EFAULT;
1929}
1930EXPORT_SYMBOL_GPL(iov_iter_extract_pages);