1 #include <linux/export.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
7 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
11 __v.iov_len = min(n, __p->iov_len - skip); \
12 if (likely(__v.iov_len)) { \
13 __v.iov_base = __p->iov_base + skip; \
15 __v.iov_len -= left; \
16 skip += __v.iov_len; \
21 while (unlikely(!left && n)) { \
23 __v.iov_len = min(n, __p->iov_len); \
24 if (unlikely(!__v.iov_len)) \
26 __v.iov_base = __p->iov_base; \
28 __v.iov_len -= left; \
35 #define iterate_bvec(i, n, __v, __p, skip, STEP) { \
38 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
39 if (likely(__v.bv_len)) { \
40 __v.bv_page = __p->bv_page; \
41 __v.bv_offset = __p->bv_offset + skip; \
46 while (unlikely(n)) { \
48 __v.bv_len = min_t(size_t, n, __p->bv_len); \
49 if (unlikely(!__v.bv_len)) \
51 __v.bv_page = __p->bv_page; \
52 __v.bv_offset = __p->bv_offset; \
60 #define iterate_all_kinds(i, n, v, I, B) { \
61 size_t skip = i->iov_offset; \
62 if (unlikely(i->type & ITER_BVEC)) { \
63 const struct bio_vec *bvec; \
65 iterate_bvec(i, n, v, bvec, skip, (B)) \
67 const struct iovec *iov; \
69 iterate_iovec(i, n, v, iov, skip, (I)) \
73 #define iterate_and_advance(i, n, v, I, B) { \
74 size_t skip = i->iov_offset; \
75 if (unlikely(i->type & ITER_BVEC)) { \
76 const struct bio_vec *bvec; \
78 iterate_bvec(i, n, v, bvec, skip, (B)) \
79 if (skip == bvec->bv_len) { \
83 i->nr_segs -= bvec - i->bvec; \
86 const struct iovec *iov; \
88 iterate_iovec(i, n, v, iov, skip, (I)) \
89 if (skip == iov->iov_len) { \
93 i->nr_segs -= iov - i->iov; \
97 i->iov_offset = skip; \
100 static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i)
102 size_t skip, copy, left, wanted;
103 const struct iovec *iov;
106 if (unlikely(bytes > i->count))
109 if (unlikely(!bytes))
114 skip = i->iov_offset;
115 buf = iov->iov_base + skip;
116 copy = min(bytes, iov->iov_len - skip);
118 left = __copy_to_user(buf, from, copy);
123 while (unlikely(!left && bytes)) {
126 copy = min(bytes, iov->iov_len);
127 left = __copy_to_user(buf, from, copy);
134 if (skip == iov->iov_len) {
138 i->count -= wanted - bytes;
139 i->nr_segs -= iov - i->iov;
141 i->iov_offset = skip;
142 return wanted - bytes;
145 static size_t copy_from_iter_iovec(void *to, size_t bytes, struct iov_iter *i)
147 size_t skip, copy, left, wanted;
148 const struct iovec *iov;
151 if (unlikely(bytes > i->count))
154 if (unlikely(!bytes))
159 skip = i->iov_offset;
160 buf = iov->iov_base + skip;
161 copy = min(bytes, iov->iov_len - skip);
163 left = __copy_from_user(to, buf, copy);
168 while (unlikely(!left && bytes)) {
171 copy = min(bytes, iov->iov_len);
172 left = __copy_from_user(to, buf, copy);
179 if (skip == iov->iov_len) {
183 i->count -= wanted - bytes;
184 i->nr_segs -= iov - i->iov;
186 i->iov_offset = skip;
187 return wanted - bytes;
190 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
193 size_t skip, copy, left, wanted;
194 const struct iovec *iov;
198 if (unlikely(bytes > i->count))
201 if (unlikely(!bytes))
206 skip = i->iov_offset;
207 buf = iov->iov_base + skip;
208 copy = min(bytes, iov->iov_len - skip);
210 if (!fault_in_pages_writeable(buf, copy)) {
211 kaddr = kmap_atomic(page);
212 from = kaddr + offset;
214 /* first chunk, usually the only one */
215 left = __copy_to_user_inatomic(buf, from, copy);
221 while (unlikely(!left && bytes)) {
224 copy = min(bytes, iov->iov_len);
225 left = __copy_to_user_inatomic(buf, from, copy);
231 if (likely(!bytes)) {
232 kunmap_atomic(kaddr);
235 offset = from - kaddr;
237 kunmap_atomic(kaddr);
238 copy = min(bytes, iov->iov_len - skip);
240 /* Too bad - revert to non-atomic kmap */
242 from = kaddr + offset;
243 left = __copy_to_user(buf, from, copy);
248 while (unlikely(!left && bytes)) {
251 copy = min(bytes, iov->iov_len);
252 left = __copy_to_user(buf, from, copy);
260 if (skip == iov->iov_len) {
264 i->count -= wanted - bytes;
265 i->nr_segs -= iov - i->iov;
267 i->iov_offset = skip;
268 return wanted - bytes;
271 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
274 size_t skip, copy, left, wanted;
275 const struct iovec *iov;
279 if (unlikely(bytes > i->count))
282 if (unlikely(!bytes))
287 skip = i->iov_offset;
288 buf = iov->iov_base + skip;
289 copy = min(bytes, iov->iov_len - skip);
291 if (!fault_in_pages_readable(buf, copy)) {
292 kaddr = kmap_atomic(page);
295 /* first chunk, usually the only one */
296 left = __copy_from_user_inatomic(to, buf, copy);
302 while (unlikely(!left && bytes)) {
305 copy = min(bytes, iov->iov_len);
306 left = __copy_from_user_inatomic(to, buf, copy);
312 if (likely(!bytes)) {
313 kunmap_atomic(kaddr);
318 kunmap_atomic(kaddr);
319 copy = min(bytes, iov->iov_len - skip);
321 /* Too bad - revert to non-atomic kmap */
324 left = __copy_from_user(to, buf, copy);
329 while (unlikely(!left && bytes)) {
332 copy = min(bytes, iov->iov_len);
333 left = __copy_from_user(to, buf, copy);
341 if (skip == iov->iov_len) {
345 i->count -= wanted - bytes;
346 i->nr_segs -= iov - i->iov;
348 i->iov_offset = skip;
349 return wanted - bytes;
352 static size_t zero_iovec(size_t bytes, struct iov_iter *i)
354 size_t skip, copy, left, wanted;
355 const struct iovec *iov;
358 if (unlikely(bytes > i->count))
361 if (unlikely(!bytes))
366 skip = i->iov_offset;
367 buf = iov->iov_base + skip;
368 copy = min(bytes, iov->iov_len - skip);
370 left = __clear_user(buf, copy);
375 while (unlikely(!left && bytes)) {
378 copy = min(bytes, iov->iov_len);
379 left = __clear_user(buf, copy);
385 if (skip == iov->iov_len) {
389 i->count -= wanted - bytes;
390 i->nr_segs -= iov - i->iov;
392 i->iov_offset = skip;
393 return wanted - bytes;
397 * Fault in the first iovec of the given iov_iter, to a maximum length
398 * of bytes. Returns 0 on success, or non-zero if the memory could not be
399 * accessed (ie. because it is an invalid address).
401 * writev-intensive code may want this to prefault several iovecs -- that
402 * would be possible (callers must not rely on the fact that _only_ the
403 * first iovec will be faulted with the current implementation).
405 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
407 if (!(i->type & ITER_BVEC)) {
408 char __user *buf = i->iov->iov_base + i->iov_offset;
409 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
410 return fault_in_pages_readable(buf, bytes);
414 EXPORT_SYMBOL(iov_iter_fault_in_readable);
416 void iov_iter_init(struct iov_iter *i, int direction,
417 const struct iovec *iov, unsigned long nr_segs,
420 /* It will get better. Eventually... */
421 if (segment_eq(get_fs(), KERNEL_DS))
422 direction |= ITER_KVEC;
425 i->nr_segs = nr_segs;
429 EXPORT_SYMBOL(iov_iter_init);
431 static ssize_t get_pages_alloc_iovec(struct iov_iter *i,
432 struct page ***pages, size_t maxsize,
435 size_t offset = i->iov_offset;
436 const struct iovec *iov = i->iov;
443 len = iov->iov_len - offset;
448 addr = (unsigned long)iov->iov_base + offset;
449 len += *start = addr & (PAGE_SIZE - 1);
450 addr &= ~(PAGE_SIZE - 1);
451 n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
453 p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
455 p = vmalloc(n * sizeof(struct page *));
459 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
460 if (unlikely(res < 0)) {
465 return (res == n ? len : res * PAGE_SIZE) - *start;
468 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
470 char *from = kmap_atomic(page);
471 memcpy(to, from + offset, len);
475 static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
477 char *to = kmap_atomic(page);
478 memcpy(to + offset, from, len);
482 static void memzero_page(struct page *page, size_t offset, size_t len)
484 char *addr = kmap_atomic(page);
485 memset(addr + offset, 0, len);
489 static size_t copy_to_iter_bvec(void *from, size_t bytes, struct iov_iter *i)
491 size_t skip, copy, wanted;
492 const struct bio_vec *bvec;
494 if (unlikely(bytes > i->count))
497 if (unlikely(!bytes))
502 skip = i->iov_offset;
503 copy = min_t(size_t, bytes, bvec->bv_len - skip);
505 memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy);
511 copy = min(bytes, (size_t)bvec->bv_len);
512 memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, copy);
517 if (skip == bvec->bv_len) {
521 i->count -= wanted - bytes;
522 i->nr_segs -= bvec - i->bvec;
524 i->iov_offset = skip;
525 return wanted - bytes;
528 static size_t copy_from_iter_bvec(void *to, size_t bytes, struct iov_iter *i)
530 size_t skip, copy, wanted;
531 const struct bio_vec *bvec;
533 if (unlikely(bytes > i->count))
536 if (unlikely(!bytes))
541 skip = i->iov_offset;
543 copy = min(bytes, bvec->bv_len - skip);
545 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy);
553 copy = min(bytes, (size_t)bvec->bv_len);
554 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, copy);
559 if (skip == bvec->bv_len) {
564 i->nr_segs -= bvec - i->bvec;
566 i->iov_offset = skip;
570 static size_t copy_page_to_iter_bvec(struct page *page, size_t offset,
571 size_t bytes, struct iov_iter *i)
573 void *kaddr = kmap_atomic(page);
574 size_t wanted = copy_to_iter_bvec(kaddr + offset, bytes, i);
575 kunmap_atomic(kaddr);
579 static size_t copy_page_from_iter_bvec(struct page *page, size_t offset,
580 size_t bytes, struct iov_iter *i)
582 void *kaddr = kmap_atomic(page);
583 size_t wanted = copy_from_iter_bvec(kaddr + offset, bytes, i);
584 kunmap_atomic(kaddr);
588 static size_t zero_bvec(size_t bytes, struct iov_iter *i)
590 size_t skip, copy, wanted;
591 const struct bio_vec *bvec;
593 if (unlikely(bytes > i->count))
596 if (unlikely(!bytes))
601 skip = i->iov_offset;
602 copy = min_t(size_t, bytes, bvec->bv_len - skip);
604 memzero_page(bvec->bv_page, skip + bvec->bv_offset, copy);
609 copy = min(bytes, (size_t)bvec->bv_len);
610 memzero_page(bvec->bv_page, bvec->bv_offset, copy);
614 if (skip == bvec->bv_len) {
618 i->count -= wanted - bytes;
619 i->nr_segs -= bvec - i->bvec;
621 i->iov_offset = skip;
622 return wanted - bytes;
625 static ssize_t get_pages_alloc_bvec(struct iov_iter *i,
626 struct page ***pages, size_t maxsize,
629 const struct bio_vec *bvec = i->bvec;
630 size_t len = bvec->bv_len - i->iov_offset;
635 *start = bvec->bv_offset + i->iov_offset;
637 *pages = kmalloc(sizeof(struct page *), GFP_KERNEL);
641 get_page(**pages = bvec->bv_page);
646 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
649 if (i->type & ITER_BVEC)
650 return copy_page_to_iter_bvec(page, offset, bytes, i);
652 return copy_page_to_iter_iovec(page, offset, bytes, i);
654 EXPORT_SYMBOL(copy_page_to_iter);
656 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
659 if (i->type & ITER_BVEC)
660 return copy_page_from_iter_bvec(page, offset, bytes, i);
662 return copy_page_from_iter_iovec(page, offset, bytes, i);
664 EXPORT_SYMBOL(copy_page_from_iter);
666 size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
668 if (i->type & ITER_BVEC)
669 return copy_to_iter_bvec(addr, bytes, i);
671 return copy_to_iter_iovec(addr, bytes, i);
673 EXPORT_SYMBOL(copy_to_iter);
675 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
677 if (i->type & ITER_BVEC)
678 return copy_from_iter_bvec(addr, bytes, i);
680 return copy_from_iter_iovec(addr, bytes, i);
682 EXPORT_SYMBOL(copy_from_iter);
684 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
686 if (i->type & ITER_BVEC) {
687 return zero_bvec(bytes, i);
689 return zero_iovec(bytes, i);
692 EXPORT_SYMBOL(iov_iter_zero);
694 size_t iov_iter_copy_from_user_atomic(struct page *page,
695 struct iov_iter *i, unsigned long offset, size_t bytes)
697 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
698 iterate_all_kinds(i, bytes, v,
699 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
700 v.iov_base, v.iov_len),
701 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
702 v.bv_offset, v.bv_len)
704 kunmap_atomic(kaddr);
707 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
709 void iov_iter_advance(struct iov_iter *i, size_t size)
711 iterate_and_advance(i, size, v, 0, 0)
713 EXPORT_SYMBOL(iov_iter_advance);
716 * Return the count of just the current iov_iter segment.
718 size_t iov_iter_single_seg_count(const struct iov_iter *i)
722 else if (i->type & ITER_BVEC)
723 return min(i->count, i->bvec->bv_len - i->iov_offset);
725 return min(i->count, i->iov->iov_len - i->iov_offset);
727 EXPORT_SYMBOL(iov_iter_single_seg_count);
729 unsigned long iov_iter_alignment(const struct iov_iter *i)
731 unsigned long res = 0;
732 size_t size = i->count;
737 iterate_all_kinds(i, size, v,
738 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
739 res |= v.bv_offset | v.bv_len
743 EXPORT_SYMBOL(iov_iter_alignment);
745 ssize_t iov_iter_get_pages(struct iov_iter *i,
746 struct page **pages, size_t maxsize, unsigned maxpages,
749 if (maxsize > i->count)
755 iterate_all_kinds(i, maxsize, v, ({
756 unsigned long addr = (unsigned long)v.iov_base;
757 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
761 if (len > maxpages * PAGE_SIZE)
762 len = maxpages * PAGE_SIZE;
763 addr &= ~(PAGE_SIZE - 1);
764 n = DIV_ROUND_UP(len, PAGE_SIZE);
765 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
766 if (unlikely(res < 0))
768 return (res == n ? len : res * PAGE_SIZE) - *start;
770 /* can't be more than PAGE_SIZE */
771 *start = v.bv_offset;
772 get_page(*pages = v.bv_page);
778 EXPORT_SYMBOL(iov_iter_get_pages);
780 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
781 struct page ***pages, size_t maxsize,
784 if (i->type & ITER_BVEC)
785 return get_pages_alloc_bvec(i, pages, maxsize, start);
787 return get_pages_alloc_iovec(i, pages, maxsize, start);
789 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
791 int iov_iter_npages(const struct iov_iter *i, int maxpages)
793 size_t size = i->count;
799 iterate_all_kinds(i, size, v, ({
800 unsigned long p = (unsigned long)v.iov_base;
801 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
803 if (npages >= maxpages)
807 if (npages >= maxpages)
813 EXPORT_SYMBOL(iov_iter_npages);