iov_iter.c: convert iov_iter_get_pages_alloc() to iterate_all_kinds
[linux-2.6-block.git] / mm / iov_iter.c
CommitLineData
4f18cd31
AV
1#include <linux/export.h>
2#include <linux/uio.h>
3#include <linux/pagemap.h>
91f79c43
AV
4#include <linux/slab.h>
5#include <linux/vmalloc.h>
4f18cd31 6
04a31165
AV
7#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
8 size_t left; \
9 size_t wanted = n; \
10 __p = i->iov; \
11 __v.iov_len = min(n, __p->iov_len - skip); \
12 if (likely(__v.iov_len)) { \
13 __v.iov_base = __p->iov_base + skip; \
14 left = (STEP); \
15 __v.iov_len -= left; \
16 skip += __v.iov_len; \
17 n -= __v.iov_len; \
18 } else { \
19 left = 0; \
20 } \
21 while (unlikely(!left && n)) { \
22 __p++; \
23 __v.iov_len = min(n, __p->iov_len); \
24 if (unlikely(!__v.iov_len)) \
25 continue; \
26 __v.iov_base = __p->iov_base; \
27 left = (STEP); \
28 __v.iov_len -= left; \
29 skip = __v.iov_len; \
30 n -= __v.iov_len; \
31 } \
32 n = wanted - n; \
33}
34
35#define iterate_bvec(i, n, __v, __p, skip, STEP) { \
36 size_t wanted = n; \
37 __p = i->bvec; \
38 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
39 if (likely(__v.bv_len)) { \
40 __v.bv_page = __p->bv_page; \
41 __v.bv_offset = __p->bv_offset + skip; \
42 (void)(STEP); \
43 skip += __v.bv_len; \
44 n -= __v.bv_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.bv_len = min_t(size_t, n, __p->bv_len); \
49 if (unlikely(!__v.bv_len)) \
50 continue; \
51 __v.bv_page = __p->bv_page; \
52 __v.bv_offset = __p->bv_offset; \
53 (void)(STEP); \
54 skip = __v.bv_len; \
55 n -= __v.bv_len; \
56 } \
57 n = wanted; \
58}
59
60#define iterate_all_kinds(i, n, v, I, B) { \
61 size_t skip = i->iov_offset; \
62 if (unlikely(i->type & ITER_BVEC)) { \
63 const struct bio_vec *bvec; \
64 struct bio_vec v; \
65 iterate_bvec(i, n, v, bvec, skip, (B)) \
66 } else { \
67 const struct iovec *iov; \
68 struct iovec v; \
69 iterate_iovec(i, n, v, iov, skip, (I)) \
70 } \
71}
72
7ce2a91e
AV
73#define iterate_and_advance(i, n, v, I, B) { \
74 size_t skip = i->iov_offset; \
75 if (unlikely(i->type & ITER_BVEC)) { \
76 const struct bio_vec *bvec; \
77 struct bio_vec v; \
78 iterate_bvec(i, n, v, bvec, skip, (B)) \
79 if (skip == bvec->bv_len) { \
80 bvec++; \
81 skip = 0; \
82 } \
83 i->nr_segs -= bvec - i->bvec; \
84 i->bvec = bvec; \
85 } else { \
86 const struct iovec *iov; \
87 struct iovec v; \
88 iterate_iovec(i, n, v, iov, skip, (I)) \
89 if (skip == iov->iov_len) { \
90 iov++; \
91 skip = 0; \
92 } \
93 i->nr_segs -= iov - i->iov; \
94 i->iov = iov; \
95 } \
96 i->count -= n; \
97 i->iov_offset = skip; \
98}
99
c35e0248
MW
100static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i)
101{
102 size_t skip, copy, left, wanted;
103 const struct iovec *iov;
104 char __user *buf;
105
106 if (unlikely(bytes > i->count))
107 bytes = i->count;
108
109 if (unlikely(!bytes))
110 return 0;
111
112 wanted = bytes;
113 iov = i->iov;
114 skip = i->iov_offset;
115 buf = iov->iov_base + skip;
116 copy = min(bytes, iov->iov_len - skip);
117
118 left = __copy_to_user(buf, from, copy);
119 copy -= left;
120 skip += copy;
121 from += copy;
122 bytes -= copy;
123 while (unlikely(!left && bytes)) {
124 iov++;
125 buf = iov->iov_base;
126 copy = min(bytes, iov->iov_len);
127 left = __copy_to_user(buf, from, copy);
128 copy -= left;
129 skip = copy;
130 from += copy;
131 bytes -= copy;
132 }
133
134 if (skip == iov->iov_len) {
135 iov++;
136 skip = 0;
137 }
138 i->count -= wanted - bytes;
139 i->nr_segs -= iov - i->iov;
140 i->iov = iov;
141 i->iov_offset = skip;
142 return wanted - bytes;
143}
144
145static size_t copy_from_iter_iovec(void *to, size_t bytes, struct iov_iter *i)
146{
147 size_t skip, copy, left, wanted;
148 const struct iovec *iov;
149 char __user *buf;
150
151 if (unlikely(bytes > i->count))
152 bytes = i->count;
153
154 if (unlikely(!bytes))
155 return 0;
156
157 wanted = bytes;
158 iov = i->iov;
159 skip = i->iov_offset;
160 buf = iov->iov_base + skip;
161 copy = min(bytes, iov->iov_len - skip);
162
163 left = __copy_from_user(to, buf, copy);
164 copy -= left;
165 skip += copy;
166 to += copy;
167 bytes -= copy;
168 while (unlikely(!left && bytes)) {
169 iov++;
170 buf = iov->iov_base;
171 copy = min(bytes, iov->iov_len);
172 left = __copy_from_user(to, buf, copy);
173 copy -= left;
174 skip = copy;
175 to += copy;
176 bytes -= copy;
177 }
178
179 if (skip == iov->iov_len) {
180 iov++;
181 skip = 0;
182 }
183 i->count -= wanted - bytes;
184 i->nr_segs -= iov - i->iov;
185 i->iov = iov;
186 i->iov_offset = skip;
187 return wanted - bytes;
188}
189
62a8067a 190static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
4f18cd31
AV
191 struct iov_iter *i)
192{
193 size_t skip, copy, left, wanted;
194 const struct iovec *iov;
195 char __user *buf;
196 void *kaddr, *from;
197
198 if (unlikely(bytes > i->count))
199 bytes = i->count;
200
201 if (unlikely(!bytes))
202 return 0;
203
204 wanted = bytes;
205 iov = i->iov;
206 skip = i->iov_offset;
207 buf = iov->iov_base + skip;
208 copy = min(bytes, iov->iov_len - skip);
209
210 if (!fault_in_pages_writeable(buf, copy)) {
211 kaddr = kmap_atomic(page);
212 from = kaddr + offset;
213
214 /* first chunk, usually the only one */
215 left = __copy_to_user_inatomic(buf, from, copy);
216 copy -= left;
217 skip += copy;
218 from += copy;
219 bytes -= copy;
220
221 while (unlikely(!left && bytes)) {
222 iov++;
223 buf = iov->iov_base;
224 copy = min(bytes, iov->iov_len);
225 left = __copy_to_user_inatomic(buf, from, copy);
226 copy -= left;
227 skip = copy;
228 from += copy;
229 bytes -= copy;
230 }
231 if (likely(!bytes)) {
232 kunmap_atomic(kaddr);
233 goto done;
234 }
235 offset = from - kaddr;
236 buf += copy;
237 kunmap_atomic(kaddr);
238 copy = min(bytes, iov->iov_len - skip);
239 }
240 /* Too bad - revert to non-atomic kmap */
241 kaddr = kmap(page);
242 from = kaddr + offset;
243 left = __copy_to_user(buf, from, copy);
244 copy -= left;
245 skip += copy;
246 from += copy;
247 bytes -= copy;
248 while (unlikely(!left && bytes)) {
249 iov++;
250 buf = iov->iov_base;
251 copy = min(bytes, iov->iov_len);
252 left = __copy_to_user(buf, from, copy);
253 copy -= left;
254 skip = copy;
255 from += copy;
256 bytes -= copy;
257 }
258 kunmap(page);
259done:
81055e58
AV
260 if (skip == iov->iov_len) {
261 iov++;
262 skip = 0;
263 }
4f18cd31
AV
264 i->count -= wanted - bytes;
265 i->nr_segs -= iov - i->iov;
266 i->iov = iov;
267 i->iov_offset = skip;
268 return wanted - bytes;
269}
4f18cd31 270
62a8067a 271static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
f0d1bec9
AV
272 struct iov_iter *i)
273{
274 size_t skip, copy, left, wanted;
275 const struct iovec *iov;
276 char __user *buf;
277 void *kaddr, *to;
278
279 if (unlikely(bytes > i->count))
280 bytes = i->count;
281
282 if (unlikely(!bytes))
283 return 0;
284
285 wanted = bytes;
286 iov = i->iov;
287 skip = i->iov_offset;
288 buf = iov->iov_base + skip;
289 copy = min(bytes, iov->iov_len - skip);
290
291 if (!fault_in_pages_readable(buf, copy)) {
292 kaddr = kmap_atomic(page);
293 to = kaddr + offset;
294
295 /* first chunk, usually the only one */
296 left = __copy_from_user_inatomic(to, buf, copy);
297 copy -= left;
298 skip += copy;
299 to += copy;
300 bytes -= copy;
301
302 while (unlikely(!left && bytes)) {
303 iov++;
304 buf = iov->iov_base;
305 copy = min(bytes, iov->iov_len);
306 left = __copy_from_user_inatomic(to, buf, copy);
307 copy -= left;
308 skip = copy;
309 to += copy;
310 bytes -= copy;
311 }
312 if (likely(!bytes)) {
313 kunmap_atomic(kaddr);
314 goto done;
315 }
316 offset = to - kaddr;
317 buf += copy;
318 kunmap_atomic(kaddr);
319 copy = min(bytes, iov->iov_len - skip);
320 }
321 /* Too bad - revert to non-atomic kmap */
322 kaddr = kmap(page);
323 to = kaddr + offset;
324 left = __copy_from_user(to, buf, copy);
325 copy -= left;
326 skip += copy;
327 to += copy;
328 bytes -= copy;
329 while (unlikely(!left && bytes)) {
330 iov++;
331 buf = iov->iov_base;
332 copy = min(bytes, iov->iov_len);
333 left = __copy_from_user(to, buf, copy);
334 copy -= left;
335 skip = copy;
336 to += copy;
337 bytes -= copy;
338 }
339 kunmap(page);
340done:
81055e58
AV
341 if (skip == iov->iov_len) {
342 iov++;
343 skip = 0;
344 }
f0d1bec9
AV
345 i->count -= wanted - bytes;
346 i->nr_segs -= iov - i->iov;
347 i->iov = iov;
348 i->iov_offset = skip;
349 return wanted - bytes;
350}
f0d1bec9 351
c35e0248
MW
352static size_t zero_iovec(size_t bytes, struct iov_iter *i)
353{
354 size_t skip, copy, left, wanted;
355 const struct iovec *iov;
356 char __user *buf;
357
358 if (unlikely(bytes > i->count))
359 bytes = i->count;
360
361 if (unlikely(!bytes))
362 return 0;
363
364 wanted = bytes;
365 iov = i->iov;
366 skip = i->iov_offset;
367 buf = iov->iov_base + skip;
368 copy = min(bytes, iov->iov_len - skip);
369
370 left = __clear_user(buf, copy);
371 copy -= left;
372 skip += copy;
373 bytes -= copy;
374
375 while (unlikely(!left && bytes)) {
376 iov++;
377 buf = iov->iov_base;
378 copy = min(bytes, iov->iov_len);
379 left = __clear_user(buf, copy);
380 copy -= left;
381 skip = copy;
382 bytes -= copy;
383 }
384
385 if (skip == iov->iov_len) {
386 iov++;
387 skip = 0;
388 }
389 i->count -= wanted - bytes;
390 i->nr_segs -= iov - i->iov;
391 i->iov = iov;
392 i->iov_offset = skip;
393 return wanted - bytes;
394}
395
4f18cd31
AV
396/*
397 * Fault in the first iovec of the given iov_iter, to a maximum length
398 * of bytes. Returns 0 on success, or non-zero if the memory could not be
399 * accessed (ie. because it is an invalid address).
400 *
401 * writev-intensive code may want this to prefault several iovecs -- that
402 * would be possible (callers must not rely on the fact that _only_ the
403 * first iovec will be faulted with the current implementation).
404 */
405int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
406{
62a8067a
AV
407 if (!(i->type & ITER_BVEC)) {
408 char __user *buf = i->iov->iov_base + i->iov_offset;
409 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
410 return fault_in_pages_readable(buf, bytes);
411 }
412 return 0;
4f18cd31
AV
413}
414EXPORT_SYMBOL(iov_iter_fault_in_readable);
415
71d8e532
AV
416void iov_iter_init(struct iov_iter *i, int direction,
417 const struct iovec *iov, unsigned long nr_segs,
418 size_t count)
419{
420 /* It will get better. Eventually... */
421 if (segment_eq(get_fs(), KERNEL_DS))
62a8067a 422 direction |= ITER_KVEC;
71d8e532
AV
423 i->type = direction;
424 i->iov = iov;
425 i->nr_segs = nr_segs;
426 i->iov_offset = 0;
427 i->count = count;
428}
429EXPORT_SYMBOL(iov_iter_init);
7b2c99d1 430
62a8067a
AV
431static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
432{
433 char *from = kmap_atomic(page);
434 memcpy(to, from + offset, len);
435 kunmap_atomic(from);
436}
437
438static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
439{
440 char *to = kmap_atomic(page);
441 memcpy(to + offset, from, len);
442 kunmap_atomic(to);
443}
444
c35e0248
MW
445static void memzero_page(struct page *page, size_t offset, size_t len)
446{
447 char *addr = kmap_atomic(page);
448 memset(addr + offset, 0, len);
449 kunmap_atomic(addr);
450}
451
452static size_t copy_to_iter_bvec(void *from, size_t bytes, struct iov_iter *i)
62a8067a
AV
453{
454 size_t skip, copy, wanted;
455 const struct bio_vec *bvec;
62a8067a
AV
456
457 if (unlikely(bytes > i->count))
458 bytes = i->count;
459
460 if (unlikely(!bytes))
461 return 0;
462
463 wanted = bytes;
464 bvec = i->bvec;
465 skip = i->iov_offset;
466 copy = min_t(size_t, bytes, bvec->bv_len - skip);
467
62a8067a
AV
468 memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy);
469 skip += copy;
470 from += copy;
471 bytes -= copy;
472 while (bytes) {
473 bvec++;
474 copy = min(bytes, (size_t)bvec->bv_len);
475 memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, copy);
476 skip = copy;
477 from += copy;
478 bytes -= copy;
479 }
62a8067a
AV
480 if (skip == bvec->bv_len) {
481 bvec++;
482 skip = 0;
483 }
484 i->count -= wanted - bytes;
485 i->nr_segs -= bvec - i->bvec;
486 i->bvec = bvec;
487 i->iov_offset = skip;
488 return wanted - bytes;
489}
490
c35e0248 491static size_t copy_from_iter_bvec(void *to, size_t bytes, struct iov_iter *i)
62a8067a
AV
492{
493 size_t skip, copy, wanted;
494 const struct bio_vec *bvec;
62a8067a
AV
495
496 if (unlikely(bytes > i->count))
497 bytes = i->count;
498
499 if (unlikely(!bytes))
500 return 0;
501
502 wanted = bytes;
503 bvec = i->bvec;
504 skip = i->iov_offset;
505
62a8067a
AV
506 copy = min(bytes, bvec->bv_len - skip);
507
508 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy);
509
510 to += copy;
511 skip += copy;
512 bytes -= copy;
513
514 while (bytes) {
515 bvec++;
516 copy = min(bytes, (size_t)bvec->bv_len);
517 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, copy);
518 skip = copy;
519 to += copy;
520 bytes -= copy;
521 }
62a8067a
AV
522 if (skip == bvec->bv_len) {
523 bvec++;
524 skip = 0;
525 }
526 i->count -= wanted;
527 i->nr_segs -= bvec - i->bvec;
528 i->bvec = bvec;
529 i->iov_offset = skip;
530 return wanted;
531}
532
c35e0248
MW
533static size_t copy_page_to_iter_bvec(struct page *page, size_t offset,
534 size_t bytes, struct iov_iter *i)
535{
536 void *kaddr = kmap_atomic(page);
537 size_t wanted = copy_to_iter_bvec(kaddr + offset, bytes, i);
538 kunmap_atomic(kaddr);
539 return wanted;
540}
541
542static size_t copy_page_from_iter_bvec(struct page *page, size_t offset,
543 size_t bytes, struct iov_iter *i)
544{
545 void *kaddr = kmap_atomic(page);
546 size_t wanted = copy_from_iter_bvec(kaddr + offset, bytes, i);
547 kunmap_atomic(kaddr);
548 return wanted;
549}
550
551static size_t zero_bvec(size_t bytes, struct iov_iter *i)
552{
553 size_t skip, copy, wanted;
554 const struct bio_vec *bvec;
555
556 if (unlikely(bytes > i->count))
557 bytes = i->count;
558
559 if (unlikely(!bytes))
560 return 0;
561
562 wanted = bytes;
563 bvec = i->bvec;
564 skip = i->iov_offset;
565 copy = min_t(size_t, bytes, bvec->bv_len - skip);
566
567 memzero_page(bvec->bv_page, skip + bvec->bv_offset, copy);
568 skip += copy;
569 bytes -= copy;
570 while (bytes) {
571 bvec++;
572 copy = min(bytes, (size_t)bvec->bv_len);
573 memzero_page(bvec->bv_page, bvec->bv_offset, copy);
574 skip = copy;
575 bytes -= copy;
576 }
577 if (skip == bvec->bv_len) {
578 bvec++;
579 skip = 0;
580 }
581 i->count -= wanted - bytes;
582 i->nr_segs -= bvec - i->bvec;
583 i->bvec = bvec;
584 i->iov_offset = skip;
585 return wanted - bytes;
586}
587
62a8067a
AV
588size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
589 struct iov_iter *i)
590{
591 if (i->type & ITER_BVEC)
592 return copy_page_to_iter_bvec(page, offset, bytes, i);
593 else
594 return copy_page_to_iter_iovec(page, offset, bytes, i);
595}
596EXPORT_SYMBOL(copy_page_to_iter);
597
598size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
599 struct iov_iter *i)
600{
601 if (i->type & ITER_BVEC)
602 return copy_page_from_iter_bvec(page, offset, bytes, i);
603 else
604 return copy_page_from_iter_iovec(page, offset, bytes, i);
605}
606EXPORT_SYMBOL(copy_page_from_iter);
607
c35e0248
MW
608size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
609{
610 if (i->type & ITER_BVEC)
611 return copy_to_iter_bvec(addr, bytes, i);
612 else
613 return copy_to_iter_iovec(addr, bytes, i);
614}
615EXPORT_SYMBOL(copy_to_iter);
616
617size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
618{
619 if (i->type & ITER_BVEC)
620 return copy_from_iter_bvec(addr, bytes, i);
621 else
622 return copy_from_iter_iovec(addr, bytes, i);
623}
624EXPORT_SYMBOL(copy_from_iter);
625
626size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
627{
628 if (i->type & ITER_BVEC) {
629 return zero_bvec(bytes, i);
630 } else {
631 return zero_iovec(bytes, i);
632 }
633}
634EXPORT_SYMBOL(iov_iter_zero);
635
62a8067a
AV
636size_t iov_iter_copy_from_user_atomic(struct page *page,
637 struct iov_iter *i, unsigned long offset, size_t bytes)
638{
04a31165
AV
639 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
640 iterate_all_kinds(i, bytes, v,
641 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
642 v.iov_base, v.iov_len),
643 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
644 v.bv_offset, v.bv_len)
645 )
646 kunmap_atomic(kaddr);
647 return bytes;
62a8067a
AV
648}
649EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
650
651void iov_iter_advance(struct iov_iter *i, size_t size)
652{
7ce2a91e 653 iterate_and_advance(i, size, v, 0, 0)
62a8067a
AV
654}
655EXPORT_SYMBOL(iov_iter_advance);
656
657/*
658 * Return the count of just the current iov_iter segment.
659 */
660size_t iov_iter_single_seg_count(const struct iov_iter *i)
661{
662 if (i->nr_segs == 1)
663 return i->count;
664 else if (i->type & ITER_BVEC)
62a8067a 665 return min(i->count, i->bvec->bv_len - i->iov_offset);
ad0eab92
PM
666 else
667 return min(i->count, i->iov->iov_len - i->iov_offset);
62a8067a
AV
668}
669EXPORT_SYMBOL(iov_iter_single_seg_count);
670
671unsigned long iov_iter_alignment(const struct iov_iter *i)
672{
04a31165
AV
673 unsigned long res = 0;
674 size_t size = i->count;
675
676 if (!size)
677 return 0;
678
679 iterate_all_kinds(i, size, v,
680 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
681 res |= v.bv_offset | v.bv_len
682 )
683 return res;
62a8067a
AV
684}
685EXPORT_SYMBOL(iov_iter_alignment);
686
687ssize_t iov_iter_get_pages(struct iov_iter *i,
2c80929c 688 struct page **pages, size_t maxsize, unsigned maxpages,
62a8067a
AV
689 size_t *start)
690{
e5393fae
AV
691 if (maxsize > i->count)
692 maxsize = i->count;
693
694 if (!maxsize)
695 return 0;
696
697 iterate_all_kinds(i, maxsize, v, ({
698 unsigned long addr = (unsigned long)v.iov_base;
699 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
700 int n;
701 int res;
702
703 if (len > maxpages * PAGE_SIZE)
704 len = maxpages * PAGE_SIZE;
705 addr &= ~(PAGE_SIZE - 1);
706 n = DIV_ROUND_UP(len, PAGE_SIZE);
707 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
708 if (unlikely(res < 0))
709 return res;
710 return (res == n ? len : res * PAGE_SIZE) - *start;
711 0;}),({
712 /* can't be more than PAGE_SIZE */
713 *start = v.bv_offset;
714 get_page(*pages = v.bv_page);
715 return v.bv_len;
716 })
717 )
718 return 0;
62a8067a
AV
719}
720EXPORT_SYMBOL(iov_iter_get_pages);
721
1b17f1f2
AV
722static struct page **get_pages_array(size_t n)
723{
724 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
725 if (!p)
726 p = vmalloc(n * sizeof(struct page *));
727 return p;
728}
729
62a8067a
AV
730ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
731 struct page ***pages, size_t maxsize,
732 size_t *start)
733{
1b17f1f2
AV
734 struct page **p;
735
736 if (maxsize > i->count)
737 maxsize = i->count;
738
739 if (!maxsize)
740 return 0;
741
742 iterate_all_kinds(i, maxsize, v, ({
743 unsigned long addr = (unsigned long)v.iov_base;
744 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
745 int n;
746 int res;
747
748 addr &= ~(PAGE_SIZE - 1);
749 n = DIV_ROUND_UP(len, PAGE_SIZE);
750 p = get_pages_array(n);
751 if (!p)
752 return -ENOMEM;
753 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
754 if (unlikely(res < 0)) {
755 kvfree(p);
756 return res;
757 }
758 *pages = p;
759 return (res == n ? len : res * PAGE_SIZE) - *start;
760 0;}),({
761 /* can't be more than PAGE_SIZE */
762 *start = v.bv_offset;
763 *pages = p = get_pages_array(1);
764 if (!p)
765 return -ENOMEM;
766 get_page(*p = v.bv_page);
767 return v.bv_len;
768 })
769 )
770 return 0;
62a8067a
AV
771}
772EXPORT_SYMBOL(iov_iter_get_pages_alloc);
773
774int iov_iter_npages(const struct iov_iter *i, int maxpages)
775{
e0f2dc40
AV
776 size_t size = i->count;
777 int npages = 0;
778
779 if (!size)
780 return 0;
781
782 iterate_all_kinds(i, size, v, ({
783 unsigned long p = (unsigned long)v.iov_base;
784 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
785 - p / PAGE_SIZE;
786 if (npages >= maxpages)
787 return maxpages;
788 0;}),({
789 npages++;
790 if (npages >= maxpages)
791 return maxpages;
792 })
793 )
794 return npages;
62a8067a 795}
f67da30c 796EXPORT_SYMBOL(iov_iter_npages);