iov_iter.c: convert iov_iter_get_pages() to iterate_all_kinds
[linux-block.git] / mm / iov_iter.c
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6
7 #define iterate_iovec(i, n, __v, __p, skip, STEP) {     \
8         size_t left;                                    \
9         size_t wanted = n;                              \
10         __p = i->iov;                                   \
11         __v.iov_len = min(n, __p->iov_len - skip);      \
12         if (likely(__v.iov_len)) {                      \
13                 __v.iov_base = __p->iov_base + skip;    \
14                 left = (STEP);                          \
15                 __v.iov_len -= left;                    \
16                 skip += __v.iov_len;                    \
17                 n -= __v.iov_len;                       \
18         } else {                                        \
19                 left = 0;                               \
20         }                                               \
21         while (unlikely(!left && n)) {                  \
22                 __p++;                                  \
23                 __v.iov_len = min(n, __p->iov_len);     \
24                 if (unlikely(!__v.iov_len))             \
25                         continue;                       \
26                 __v.iov_base = __p->iov_base;           \
27                 left = (STEP);                          \
28                 __v.iov_len -= left;                    \
29                 skip = __v.iov_len;                     \
30                 n -= __v.iov_len;                       \
31         }                                               \
32         n = wanted - n;                                 \
33 }
34
35 #define iterate_bvec(i, n, __v, __p, skip, STEP) {      \
36         size_t wanted = n;                              \
37         __p = i->bvec;                                  \
38         __v.bv_len = min_t(size_t, n, __p->bv_len - skip);      \
39         if (likely(__v.bv_len)) {                       \
40                 __v.bv_page = __p->bv_page;             \
41                 __v.bv_offset = __p->bv_offset + skip;  \
42                 (void)(STEP);                           \
43                 skip += __v.bv_len;                     \
44                 n -= __v.bv_len;                        \
45         }                                               \
46         while (unlikely(n)) {                           \
47                 __p++;                                  \
48                 __v.bv_len = min_t(size_t, n, __p->bv_len);     \
49                 if (unlikely(!__v.bv_len))              \
50                         continue;                       \
51                 __v.bv_page = __p->bv_page;             \
52                 __v.bv_offset = __p->bv_offset;         \
53                 (void)(STEP);                           \
54                 skip = __v.bv_len;                      \
55                 n -= __v.bv_len;                        \
56         }                                               \
57         n = wanted;                                     \
58 }
59
60 #define iterate_all_kinds(i, n, v, I, B) {                      \
61         size_t skip = i->iov_offset;                            \
62         if (unlikely(i->type & ITER_BVEC)) {                    \
63                 const struct bio_vec *bvec;                     \
64                 struct bio_vec v;                               \
65                 iterate_bvec(i, n, v, bvec, skip, (B))          \
66         } else {                                                \
67                 const struct iovec *iov;                        \
68                 struct iovec v;                                 \
69                 iterate_iovec(i, n, v, iov, skip, (I))          \
70         }                                                       \
71 }
72
73 #define iterate_and_advance(i, n, v, I, B) {                    \
74         size_t skip = i->iov_offset;                            \
75         if (unlikely(i->type & ITER_BVEC)) {                    \
76                 const struct bio_vec *bvec;                     \
77                 struct bio_vec v;                               \
78                 iterate_bvec(i, n, v, bvec, skip, (B))          \
79                 if (skip == bvec->bv_len) {                     \
80                         bvec++;                                 \
81                         skip = 0;                               \
82                 }                                               \
83                 i->nr_segs -= bvec - i->bvec;                   \
84                 i->bvec = bvec;                                 \
85         } else {                                                \
86                 const struct iovec *iov;                        \
87                 struct iovec v;                                 \
88                 iterate_iovec(i, n, v, iov, skip, (I))          \
89                 if (skip == iov->iov_len) {                     \
90                         iov++;                                  \
91                         skip = 0;                               \
92                 }                                               \
93                 i->nr_segs -= iov - i->iov;                     \
94                 i->iov = iov;                                   \
95         }                                                       \
96         i->count -= n;                                          \
97         i->iov_offset = skip;                                   \
98 }
99
100 static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i)
101 {
102         size_t skip, copy, left, wanted;
103         const struct iovec *iov;
104         char __user *buf;
105
106         if (unlikely(bytes > i->count))
107                 bytes = i->count;
108
109         if (unlikely(!bytes))
110                 return 0;
111
112         wanted = bytes;
113         iov = i->iov;
114         skip = i->iov_offset;
115         buf = iov->iov_base + skip;
116         copy = min(bytes, iov->iov_len - skip);
117
118         left = __copy_to_user(buf, from, copy);
119         copy -= left;
120         skip += copy;
121         from += copy;
122         bytes -= copy;
123         while (unlikely(!left && bytes)) {
124                 iov++;
125                 buf = iov->iov_base;
126                 copy = min(bytes, iov->iov_len);
127                 left = __copy_to_user(buf, from, copy);
128                 copy -= left;
129                 skip = copy;
130                 from += copy;
131                 bytes -= copy;
132         }
133
134         if (skip == iov->iov_len) {
135                 iov++;
136                 skip = 0;
137         }
138         i->count -= wanted - bytes;
139         i->nr_segs -= iov - i->iov;
140         i->iov = iov;
141         i->iov_offset = skip;
142         return wanted - bytes;
143 }
144
145 static size_t copy_from_iter_iovec(void *to, size_t bytes, struct iov_iter *i)
146 {
147         size_t skip, copy, left, wanted;
148         const struct iovec *iov;
149         char __user *buf;
150
151         if (unlikely(bytes > i->count))
152                 bytes = i->count;
153
154         if (unlikely(!bytes))
155                 return 0;
156
157         wanted = bytes;
158         iov = i->iov;
159         skip = i->iov_offset;
160         buf = iov->iov_base + skip;
161         copy = min(bytes, iov->iov_len - skip);
162
163         left = __copy_from_user(to, buf, copy);
164         copy -= left;
165         skip += copy;
166         to += copy;
167         bytes -= copy;
168         while (unlikely(!left && bytes)) {
169                 iov++;
170                 buf = iov->iov_base;
171                 copy = min(bytes, iov->iov_len);
172                 left = __copy_from_user(to, buf, copy);
173                 copy -= left;
174                 skip = copy;
175                 to += copy;
176                 bytes -= copy;
177         }
178
179         if (skip == iov->iov_len) {
180                 iov++;
181                 skip = 0;
182         }
183         i->count -= wanted - bytes;
184         i->nr_segs -= iov - i->iov;
185         i->iov = iov;
186         i->iov_offset = skip;
187         return wanted - bytes;
188 }
189
190 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
191                          struct iov_iter *i)
192 {
193         size_t skip, copy, left, wanted;
194         const struct iovec *iov;
195         char __user *buf;
196         void *kaddr, *from;
197
198         if (unlikely(bytes > i->count))
199                 bytes = i->count;
200
201         if (unlikely(!bytes))
202                 return 0;
203
204         wanted = bytes;
205         iov = i->iov;
206         skip = i->iov_offset;
207         buf = iov->iov_base + skip;
208         copy = min(bytes, iov->iov_len - skip);
209
210         if (!fault_in_pages_writeable(buf, copy)) {
211                 kaddr = kmap_atomic(page);
212                 from = kaddr + offset;
213
214                 /* first chunk, usually the only one */
215                 left = __copy_to_user_inatomic(buf, from, copy);
216                 copy -= left;
217                 skip += copy;
218                 from += copy;
219                 bytes -= copy;
220
221                 while (unlikely(!left && bytes)) {
222                         iov++;
223                         buf = iov->iov_base;
224                         copy = min(bytes, iov->iov_len);
225                         left = __copy_to_user_inatomic(buf, from, copy);
226                         copy -= left;
227                         skip = copy;
228                         from += copy;
229                         bytes -= copy;
230                 }
231                 if (likely(!bytes)) {
232                         kunmap_atomic(kaddr);
233                         goto done;
234                 }
235                 offset = from - kaddr;
236                 buf += copy;
237                 kunmap_atomic(kaddr);
238                 copy = min(bytes, iov->iov_len - skip);
239         }
240         /* Too bad - revert to non-atomic kmap */
241         kaddr = kmap(page);
242         from = kaddr + offset;
243         left = __copy_to_user(buf, from, copy);
244         copy -= left;
245         skip += copy;
246         from += copy;
247         bytes -= copy;
248         while (unlikely(!left && bytes)) {
249                 iov++;
250                 buf = iov->iov_base;
251                 copy = min(bytes, iov->iov_len);
252                 left = __copy_to_user(buf, from, copy);
253                 copy -= left;
254                 skip = copy;
255                 from += copy;
256                 bytes -= copy;
257         }
258         kunmap(page);
259 done:
260         if (skip == iov->iov_len) {
261                 iov++;
262                 skip = 0;
263         }
264         i->count -= wanted - bytes;
265         i->nr_segs -= iov - i->iov;
266         i->iov = iov;
267         i->iov_offset = skip;
268         return wanted - bytes;
269 }
270
271 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
272                          struct iov_iter *i)
273 {
274         size_t skip, copy, left, wanted;
275         const struct iovec *iov;
276         char __user *buf;
277         void *kaddr, *to;
278
279         if (unlikely(bytes > i->count))
280                 bytes = i->count;
281
282         if (unlikely(!bytes))
283                 return 0;
284
285         wanted = bytes;
286         iov = i->iov;
287         skip = i->iov_offset;
288         buf = iov->iov_base + skip;
289         copy = min(bytes, iov->iov_len - skip);
290
291         if (!fault_in_pages_readable(buf, copy)) {
292                 kaddr = kmap_atomic(page);
293                 to = kaddr + offset;
294
295                 /* first chunk, usually the only one */
296                 left = __copy_from_user_inatomic(to, buf, copy);
297                 copy -= left;
298                 skip += copy;
299                 to += copy;
300                 bytes -= copy;
301
302                 while (unlikely(!left && bytes)) {
303                         iov++;
304                         buf = iov->iov_base;
305                         copy = min(bytes, iov->iov_len);
306                         left = __copy_from_user_inatomic(to, buf, copy);
307                         copy -= left;
308                         skip = copy;
309                         to += copy;
310                         bytes -= copy;
311                 }
312                 if (likely(!bytes)) {
313                         kunmap_atomic(kaddr);
314                         goto done;
315                 }
316                 offset = to - kaddr;
317                 buf += copy;
318                 kunmap_atomic(kaddr);
319                 copy = min(bytes, iov->iov_len - skip);
320         }
321         /* Too bad - revert to non-atomic kmap */
322         kaddr = kmap(page);
323         to = kaddr + offset;
324         left = __copy_from_user(to, buf, copy);
325         copy -= left;
326         skip += copy;
327         to += copy;
328         bytes -= copy;
329         while (unlikely(!left && bytes)) {
330                 iov++;
331                 buf = iov->iov_base;
332                 copy = min(bytes, iov->iov_len);
333                 left = __copy_from_user(to, buf, copy);
334                 copy -= left;
335                 skip = copy;
336                 to += copy;
337                 bytes -= copy;
338         }
339         kunmap(page);
340 done:
341         if (skip == iov->iov_len) {
342                 iov++;
343                 skip = 0;
344         }
345         i->count -= wanted - bytes;
346         i->nr_segs -= iov - i->iov;
347         i->iov = iov;
348         i->iov_offset = skip;
349         return wanted - bytes;
350 }
351
352 static size_t zero_iovec(size_t bytes, struct iov_iter *i)
353 {
354         size_t skip, copy, left, wanted;
355         const struct iovec *iov;
356         char __user *buf;
357
358         if (unlikely(bytes > i->count))
359                 bytes = i->count;
360
361         if (unlikely(!bytes))
362                 return 0;
363
364         wanted = bytes;
365         iov = i->iov;
366         skip = i->iov_offset;
367         buf = iov->iov_base + skip;
368         copy = min(bytes, iov->iov_len - skip);
369
370         left = __clear_user(buf, copy);
371         copy -= left;
372         skip += copy;
373         bytes -= copy;
374
375         while (unlikely(!left && bytes)) {
376                 iov++;
377                 buf = iov->iov_base;
378                 copy = min(bytes, iov->iov_len);
379                 left = __clear_user(buf, copy);
380                 copy -= left;
381                 skip = copy;
382                 bytes -= copy;
383         }
384
385         if (skip == iov->iov_len) {
386                 iov++;
387                 skip = 0;
388         }
389         i->count -= wanted - bytes;
390         i->nr_segs -= iov - i->iov;
391         i->iov = iov;
392         i->iov_offset = skip;
393         return wanted - bytes;
394 }
395
396 /*
397  * Fault in the first iovec of the given iov_iter, to a maximum length
398  * of bytes. Returns 0 on success, or non-zero if the memory could not be
399  * accessed (ie. because it is an invalid address).
400  *
401  * writev-intensive code may want this to prefault several iovecs -- that
402  * would be possible (callers must not rely on the fact that _only_ the
403  * first iovec will be faulted with the current implementation).
404  */
405 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
406 {
407         if (!(i->type & ITER_BVEC)) {
408                 char __user *buf = i->iov->iov_base + i->iov_offset;
409                 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
410                 return fault_in_pages_readable(buf, bytes);
411         }
412         return 0;
413 }
414 EXPORT_SYMBOL(iov_iter_fault_in_readable);
415
416 void iov_iter_init(struct iov_iter *i, int direction,
417                         const struct iovec *iov, unsigned long nr_segs,
418                         size_t count)
419 {
420         /* It will get better.  Eventually... */
421         if (segment_eq(get_fs(), KERNEL_DS))
422                 direction |= ITER_KVEC;
423         i->type = direction;
424         i->iov = iov;
425         i->nr_segs = nr_segs;
426         i->iov_offset = 0;
427         i->count = count;
428 }
429 EXPORT_SYMBOL(iov_iter_init);
430
431 static ssize_t get_pages_alloc_iovec(struct iov_iter *i,
432                    struct page ***pages, size_t maxsize,
433                    size_t *start)
434 {
435         size_t offset = i->iov_offset;
436         const struct iovec *iov = i->iov;
437         size_t len;
438         unsigned long addr;
439         void *p;
440         int n;
441         int res;
442
443         len = iov->iov_len - offset;
444         if (len > i->count)
445                 len = i->count;
446         if (len > maxsize)
447                 len = maxsize;
448         addr = (unsigned long)iov->iov_base + offset;
449         len += *start = addr & (PAGE_SIZE - 1);
450         addr &= ~(PAGE_SIZE - 1);
451         n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
452         
453         p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
454         if (!p)
455                 p = vmalloc(n * sizeof(struct page *));
456         if (!p)
457                 return -ENOMEM;
458
459         res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
460         if (unlikely(res < 0)) {
461                 kvfree(p);
462                 return res;
463         }
464         *pages = p;
465         return (res == n ? len : res * PAGE_SIZE) - *start;
466 }
467
468 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
469 {
470         char *from = kmap_atomic(page);
471         memcpy(to, from + offset, len);
472         kunmap_atomic(from);
473 }
474
475 static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
476 {
477         char *to = kmap_atomic(page);
478         memcpy(to + offset, from, len);
479         kunmap_atomic(to);
480 }
481
482 static void memzero_page(struct page *page, size_t offset, size_t len)
483 {
484         char *addr = kmap_atomic(page);
485         memset(addr + offset, 0, len);
486         kunmap_atomic(addr);
487 }
488
489 static size_t copy_to_iter_bvec(void *from, size_t bytes, struct iov_iter *i)
490 {
491         size_t skip, copy, wanted;
492         const struct bio_vec *bvec;
493
494         if (unlikely(bytes > i->count))
495                 bytes = i->count;
496
497         if (unlikely(!bytes))
498                 return 0;
499
500         wanted = bytes;
501         bvec = i->bvec;
502         skip = i->iov_offset;
503         copy = min_t(size_t, bytes, bvec->bv_len - skip);
504
505         memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy);
506         skip += copy;
507         from += copy;
508         bytes -= copy;
509         while (bytes) {
510                 bvec++;
511                 copy = min(bytes, (size_t)bvec->bv_len);
512                 memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, copy);
513                 skip = copy;
514                 from += copy;
515                 bytes -= copy;
516         }
517         if (skip == bvec->bv_len) {
518                 bvec++;
519                 skip = 0;
520         }
521         i->count -= wanted - bytes;
522         i->nr_segs -= bvec - i->bvec;
523         i->bvec = bvec;
524         i->iov_offset = skip;
525         return wanted - bytes;
526 }
527
528 static size_t copy_from_iter_bvec(void *to, size_t bytes, struct iov_iter *i)
529 {
530         size_t skip, copy, wanted;
531         const struct bio_vec *bvec;
532
533         if (unlikely(bytes > i->count))
534                 bytes = i->count;
535
536         if (unlikely(!bytes))
537                 return 0;
538
539         wanted = bytes;
540         bvec = i->bvec;
541         skip = i->iov_offset;
542
543         copy = min(bytes, bvec->bv_len - skip);
544
545         memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy);
546
547         to += copy;
548         skip += copy;
549         bytes -= copy;
550
551         while (bytes) {
552                 bvec++;
553                 copy = min(bytes, (size_t)bvec->bv_len);
554                 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, copy);
555                 skip = copy;
556                 to += copy;
557                 bytes -= copy;
558         }
559         if (skip == bvec->bv_len) {
560                 bvec++;
561                 skip = 0;
562         }
563         i->count -= wanted;
564         i->nr_segs -= bvec - i->bvec;
565         i->bvec = bvec;
566         i->iov_offset = skip;
567         return wanted;
568 }
569
570 static size_t copy_page_to_iter_bvec(struct page *page, size_t offset,
571                                         size_t bytes, struct iov_iter *i)
572 {
573         void *kaddr = kmap_atomic(page);
574         size_t wanted = copy_to_iter_bvec(kaddr + offset, bytes, i);
575         kunmap_atomic(kaddr);
576         return wanted;
577 }
578
579 static size_t copy_page_from_iter_bvec(struct page *page, size_t offset,
580                                         size_t bytes, struct iov_iter *i)
581 {
582         void *kaddr = kmap_atomic(page);
583         size_t wanted = copy_from_iter_bvec(kaddr + offset, bytes, i);
584         kunmap_atomic(kaddr);
585         return wanted;
586 }
587
588 static size_t zero_bvec(size_t bytes, struct iov_iter *i)
589 {
590         size_t skip, copy, wanted;
591         const struct bio_vec *bvec;
592
593         if (unlikely(bytes > i->count))
594                 bytes = i->count;
595
596         if (unlikely(!bytes))
597                 return 0;
598
599         wanted = bytes;
600         bvec = i->bvec;
601         skip = i->iov_offset;
602         copy = min_t(size_t, bytes, bvec->bv_len - skip);
603
604         memzero_page(bvec->bv_page, skip + bvec->bv_offset, copy);
605         skip += copy;
606         bytes -= copy;
607         while (bytes) {
608                 bvec++;
609                 copy = min(bytes, (size_t)bvec->bv_len);
610                 memzero_page(bvec->bv_page, bvec->bv_offset, copy);
611                 skip = copy;
612                 bytes -= copy;
613         }
614         if (skip == bvec->bv_len) {
615                 bvec++;
616                 skip = 0;
617         }
618         i->count -= wanted - bytes;
619         i->nr_segs -= bvec - i->bvec;
620         i->bvec = bvec;
621         i->iov_offset = skip;
622         return wanted - bytes;
623 }
624
625 static ssize_t get_pages_alloc_bvec(struct iov_iter *i,
626                    struct page ***pages, size_t maxsize,
627                    size_t *start)
628 {
629         const struct bio_vec *bvec = i->bvec;
630         size_t len = bvec->bv_len - i->iov_offset;
631         if (len > i->count)
632                 len = i->count;
633         if (len > maxsize)
634                 len = maxsize;
635         *start = bvec->bv_offset + i->iov_offset;
636
637         *pages = kmalloc(sizeof(struct page *), GFP_KERNEL);
638         if (!*pages)
639                 return -ENOMEM;
640
641         get_page(**pages = bvec->bv_page);
642
643         return len;
644 }
645
646 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
647                          struct iov_iter *i)
648 {
649         if (i->type & ITER_BVEC)
650                 return copy_page_to_iter_bvec(page, offset, bytes, i);
651         else
652                 return copy_page_to_iter_iovec(page, offset, bytes, i);
653 }
654 EXPORT_SYMBOL(copy_page_to_iter);
655
656 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
657                          struct iov_iter *i)
658 {
659         if (i->type & ITER_BVEC)
660                 return copy_page_from_iter_bvec(page, offset, bytes, i);
661         else
662                 return copy_page_from_iter_iovec(page, offset, bytes, i);
663 }
664 EXPORT_SYMBOL(copy_page_from_iter);
665
666 size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
667 {
668         if (i->type & ITER_BVEC)
669                 return copy_to_iter_bvec(addr, bytes, i);
670         else
671                 return copy_to_iter_iovec(addr, bytes, i);
672 }
673 EXPORT_SYMBOL(copy_to_iter);
674
675 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
676 {
677         if (i->type & ITER_BVEC)
678                 return copy_from_iter_bvec(addr, bytes, i);
679         else
680                 return copy_from_iter_iovec(addr, bytes, i);
681 }
682 EXPORT_SYMBOL(copy_from_iter);
683
684 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
685 {
686         if (i->type & ITER_BVEC) {
687                 return zero_bvec(bytes, i);
688         } else {
689                 return zero_iovec(bytes, i);
690         }
691 }
692 EXPORT_SYMBOL(iov_iter_zero);
693
694 size_t iov_iter_copy_from_user_atomic(struct page *page,
695                 struct iov_iter *i, unsigned long offset, size_t bytes)
696 {
697         char *kaddr = kmap_atomic(page), *p = kaddr + offset;
698         iterate_all_kinds(i, bytes, v,
699                 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
700                                           v.iov_base, v.iov_len),
701                 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
702                                  v.bv_offset, v.bv_len)
703         )
704         kunmap_atomic(kaddr);
705         return bytes;
706 }
707 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
708
709 void iov_iter_advance(struct iov_iter *i, size_t size)
710 {
711         iterate_and_advance(i, size, v, 0, 0)
712 }
713 EXPORT_SYMBOL(iov_iter_advance);
714
715 /*
716  * Return the count of just the current iov_iter segment.
717  */
718 size_t iov_iter_single_seg_count(const struct iov_iter *i)
719 {
720         if (i->nr_segs == 1)
721                 return i->count;
722         else if (i->type & ITER_BVEC)
723                 return min(i->count, i->bvec->bv_len - i->iov_offset);
724         else
725                 return min(i->count, i->iov->iov_len - i->iov_offset);
726 }
727 EXPORT_SYMBOL(iov_iter_single_seg_count);
728
729 unsigned long iov_iter_alignment(const struct iov_iter *i)
730 {
731         unsigned long res = 0;
732         size_t size = i->count;
733
734         if (!size)
735                 return 0;
736
737         iterate_all_kinds(i, size, v,
738                 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
739                 res |= v.bv_offset | v.bv_len
740         )
741         return res;
742 }
743 EXPORT_SYMBOL(iov_iter_alignment);
744
745 ssize_t iov_iter_get_pages(struct iov_iter *i,
746                    struct page **pages, size_t maxsize, unsigned maxpages,
747                    size_t *start)
748 {
749         if (maxsize > i->count)
750                 maxsize = i->count;
751
752         if (!maxsize)
753                 return 0;
754
755         iterate_all_kinds(i, maxsize, v, ({
756                 unsigned long addr = (unsigned long)v.iov_base;
757                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
758                 int n;
759                 int res;
760
761                 if (len > maxpages * PAGE_SIZE)
762                         len = maxpages * PAGE_SIZE;
763                 addr &= ~(PAGE_SIZE - 1);
764                 n = DIV_ROUND_UP(len, PAGE_SIZE);
765                 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
766                 if (unlikely(res < 0))
767                         return res;
768                 return (res == n ? len : res * PAGE_SIZE) - *start;
769         0;}),({
770                 /* can't be more than PAGE_SIZE */
771                 *start = v.bv_offset;
772                 get_page(*pages = v.bv_page);
773                 return v.bv_len;
774         })
775         )
776         return 0;
777 }
778 EXPORT_SYMBOL(iov_iter_get_pages);
779
780 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
781                    struct page ***pages, size_t maxsize,
782                    size_t *start)
783 {
784         if (i->type & ITER_BVEC)
785                 return get_pages_alloc_bvec(i, pages, maxsize, start);
786         else
787                 return get_pages_alloc_iovec(i, pages, maxsize, start);
788 }
789 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
790
791 int iov_iter_npages(const struct iov_iter *i, int maxpages)
792 {
793         size_t size = i->count;
794         int npages = 0;
795
796         if (!size)
797                 return 0;
798
799         iterate_all_kinds(i, size, v, ({
800                 unsigned long p = (unsigned long)v.iov_base;
801                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
802                         - p / PAGE_SIZE;
803                 if (npages >= maxpages)
804                         return maxpages;
805         0;}),({
806                 npages++;
807                 if (npages >= maxpages)
808                         return maxpages;
809         })
810         )
811         return npages;
812 }
813 EXPORT_SYMBOL(iov_iter_npages);