1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to mapping data to requests
5 #include <linux/kernel.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/uio.h>
20 static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
23 struct bio_map_data *bmd;
25 if (data->nr_segs > UIO_MAXIOV)
28 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
31 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
33 bmd->iter.iov = bmd->iov;
38 * bio_copy_from_iter - copy all pages from iov_iter to bio
39 * @bio: The &struct bio which describes the I/O as destination
40 * @iter: iov_iter as source
42 * Copy all pages from iov_iter to bio.
43 * Returns 0 on success, or error on failure.
45 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
48 struct bvec_iter_all iter_all;
50 bio_for_each_segment_all(bvec, bio, iter_all) {
53 ret = copy_page_from_iter(bvec->bv_page,
58 if (!iov_iter_count(iter))
61 if (ret < bvec->bv_len)
69 * bio_copy_to_iter - copy all pages from bio to iov_iter
70 * @bio: The &struct bio which describes the I/O as source
71 * @iter: iov_iter as destination
73 * Copy all pages from bio to iov_iter.
74 * Returns 0 on success, or error on failure.
76 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
79 struct bvec_iter_all iter_all;
81 bio_for_each_segment_all(bvec, bio, iter_all) {
84 ret = copy_page_to_iter(bvec->bv_page,
89 if (!iov_iter_count(&iter))
92 if (ret < bvec->bv_len)
100 * bio_uncopy_user - finish previously mapped bio
101 * @bio: bio being terminated
103 * Free pages allocated from bio_copy_user_iov() and write back data
104 * to user space in case of a read.
106 static int bio_uncopy_user(struct bio *bio)
108 struct bio_map_data *bmd = bio->bi_private;
111 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
113 * if we're in a workqueue, the request is orphaned, so
114 * don't copy into a random user address space, just free
115 * and return -EINTR so user space doesn't expect any data.
119 else if (bio_data_dir(bio) == READ)
120 ret = bio_copy_to_iter(bio, bmd->iter);
121 if (bmd->is_our_pages)
130 * bio_copy_user_iov - copy user data to bio
131 * @q: destination block queue
132 * @map_data: pointer to the rq_map_data holding pages (if necessary)
133 * @iter: iovec iterator
134 * @gfp_mask: memory allocation flags
136 * Prepares and returns a bio for indirect user io, bouncing data
137 * to/from kernel pages as necessary. Must be paired with
138 * call bio_uncopy_user() on io completion.
140 static struct bio *bio_copy_user_iov(struct request_queue *q,
141 struct rq_map_data *map_data, struct iov_iter *iter,
144 struct bio_map_data *bmd;
149 unsigned int len = iter->count;
150 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
152 bmd = bio_alloc_map_data(iter, gfp_mask);
154 return ERR_PTR(-ENOMEM);
157 * We need to do a deep copy of the iov_iter including the iovecs.
158 * The caller provided iov might point to an on-stack or otherwise
161 bmd->is_our_pages = map_data ? 0 : 1;
163 nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
164 if (nr_pages > BIO_MAX_PAGES)
165 nr_pages = BIO_MAX_PAGES;
168 bio = bio_kmalloc(gfp_mask, nr_pages);
175 nr_pages = 1 << map_data->page_order;
176 i = map_data->offset / PAGE_SIZE;
179 unsigned int bytes = PAGE_SIZE;
187 if (i == map_data->nr_entries * nr_pages) {
192 page = map_data->pages[i / nr_pages];
193 page += (i % nr_pages);
197 page = alloc_page(q->bounce_gfp | gfp_mask);
204 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
218 map_data->offset += bio->bi_iter.bi_size;
223 if ((iov_iter_rw(iter) == WRITE &&
224 (!map_data || !map_data->null_mapped)) ||
225 (map_data && map_data->from_user)) {
226 ret = bio_copy_from_iter(bio, iter);
230 if (bmd->is_our_pages)
232 iov_iter_advance(iter, bio->bi_iter.bi_size);
235 bio->bi_private = bmd;
236 if (map_data && map_data->null_mapped)
237 bio_set_flag(bio, BIO_NULL_MAPPED);
249 * bio_map_user_iov - map user iovec into bio
250 * @q: the struct request_queue for the bio
251 * @iter: iovec iterator
252 * @gfp_mask: memory allocation flags
254 * Map the user space address into a bio suitable for io to a block
255 * device. Returns an error pointer in case of error.
257 static struct bio *bio_map_user_iov(struct request_queue *q,
258 struct iov_iter *iter, gfp_t gfp_mask)
260 unsigned int max_sectors = queue_max_hw_sectors(q);
265 if (!iov_iter_count(iter))
266 return ERR_PTR(-EINVAL);
268 bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
270 return ERR_PTR(-ENOMEM);
272 while (iov_iter_count(iter)) {
275 size_t offs, added = 0;
278 bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
279 if (unlikely(bytes <= 0)) {
280 ret = bytes ? bytes : -EFAULT;
284 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
286 if (unlikely(offs & queue_dma_alignment(q))) {
290 for (j = 0; j < npages; j++) {
291 struct page *page = pages[j];
292 unsigned int n = PAGE_SIZE - offs;
293 bool same_page = false;
298 if (!bio_add_hw_page(q, bio, page, n, offs,
299 max_sectors, &same_page)) {
309 iov_iter_advance(iter, added);
312 * release the pages we didn't map into the bio, if any
315 put_page(pages[j++]);
317 /* couldn't stuff something into bio? */
322 bio_set_flag(bio, BIO_USER_MAPPED);
325 * subtle -- if bio_map_user_iov() ended up bouncing a bio,
326 * it would normally disappear when its bi_end_io is run.
327 * however, we need it for the unmap, so grab an extra
334 bio_release_pages(bio, false);
340 * bio_unmap_user - unmap a bio
341 * @bio: the bio being unmapped
343 * Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
346 * bio_unmap_user() may sleep.
348 static void bio_unmap_user(struct bio *bio)
350 bio_release_pages(bio, bio_data_dir(bio) == READ);
355 static void bio_invalidate_vmalloc_pages(struct bio *bio)
357 #ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
358 if (bio->bi_private && !op_is_write(bio_op(bio))) {
359 unsigned long i, len = 0;
361 for (i = 0; i < bio->bi_vcnt; i++)
362 len += bio->bi_io_vec[i].bv_len;
363 invalidate_kernel_vmap_range(bio->bi_private, len);
368 static void bio_map_kern_endio(struct bio *bio)
370 bio_invalidate_vmalloc_pages(bio);
375 * bio_map_kern - map kernel address into bio
376 * @q: the struct request_queue for the bio
377 * @data: pointer to buffer to map
378 * @len: length in bytes
379 * @gfp_mask: allocation flags for bio allocation
381 * Map the kernel address into a bio suitable for io to a block
382 * device. Returns an error pointer in case of error.
384 static struct bio *bio_map_kern(struct request_queue *q, void *data,
385 unsigned int len, gfp_t gfp_mask)
387 unsigned long kaddr = (unsigned long)data;
388 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
389 unsigned long start = kaddr >> PAGE_SHIFT;
390 const int nr_pages = end - start;
391 bool is_vmalloc = is_vmalloc_addr(data);
396 bio = bio_kmalloc(gfp_mask, nr_pages);
398 return ERR_PTR(-ENOMEM);
401 flush_kernel_vmap_range(data, len);
402 bio->bi_private = data;
405 offset = offset_in_page(kaddr);
406 for (i = 0; i < nr_pages; i++) {
407 unsigned int bytes = PAGE_SIZE - offset;
416 page = virt_to_page(data);
418 page = vmalloc_to_page(data);
419 if (bio_add_pc_page(q, bio, page, bytes,
421 /* we don't support partial mappings */
423 return ERR_PTR(-EINVAL);
431 bio->bi_end_io = bio_map_kern_endio;
435 static void bio_copy_kern_endio(struct bio *bio)
441 static void bio_copy_kern_endio_read(struct bio *bio)
443 char *p = bio->bi_private;
444 struct bio_vec *bvec;
445 struct bvec_iter_all iter_all;
447 bio_for_each_segment_all(bvec, bio, iter_all) {
448 memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
452 bio_copy_kern_endio(bio);
456 * bio_copy_kern - copy kernel address into bio
457 * @q: the struct request_queue for the bio
458 * @data: pointer to buffer to copy
459 * @len: length in bytes
460 * @gfp_mask: allocation flags for bio and page allocation
461 * @reading: data direction is READ
463 * copy the kernel address into a bio suitable for io to a block
464 * device. Returns an error pointer in case of error.
466 static struct bio *bio_copy_kern(struct request_queue *q, void *data,
467 unsigned int len, gfp_t gfp_mask, int reading)
469 unsigned long kaddr = (unsigned long)data;
470 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
471 unsigned long start = kaddr >> PAGE_SHIFT;
480 return ERR_PTR(-EINVAL);
482 nr_pages = end - start;
483 bio = bio_kmalloc(gfp_mask, nr_pages);
485 return ERR_PTR(-ENOMEM);
489 unsigned int bytes = PAGE_SIZE;
494 page = alloc_page(q->bounce_gfp | gfp_mask);
499 memcpy(page_address(page), p, bytes);
501 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
509 bio->bi_end_io = bio_copy_kern_endio_read;
510 bio->bi_private = data;
512 bio->bi_end_io = bio_copy_kern_endio;
520 return ERR_PTR(-ENOMEM);
524 * Append a bio to a passthrough request. Only works if the bio can be merged
525 * into the request based on the driver constraints.
527 int blk_rq_append_bio(struct request *rq, struct bio **bio)
529 struct bio *orig_bio = *bio;
530 struct bvec_iter iter;
532 unsigned int nr_segs = 0;
534 blk_queue_bounce(rq->q, bio);
536 bio_for_each_bvec(bv, *bio, iter)
540 blk_rq_bio_prep(rq, *bio, nr_segs);
542 if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
543 if (orig_bio != *bio) {
550 rq->biotail->bi_next = *bio;
552 rq->__data_len += (*bio)->bi_iter.bi_size;
553 bio_crypt_free_ctx(*bio);
558 EXPORT_SYMBOL(blk_rq_append_bio);
560 static int __blk_rq_unmap_user(struct bio *bio)
565 if (bio_flagged(bio, BIO_USER_MAPPED))
568 ret = bio_uncopy_user(bio);
574 static int __blk_rq_map_user_iov(struct request *rq,
575 struct rq_map_data *map_data, struct iov_iter *iter,
576 gfp_t gfp_mask, bool copy)
578 struct request_queue *q = rq->q;
579 struct bio *bio, *orig_bio;
583 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
585 bio = bio_map_user_iov(q, iter, gfp_mask);
590 bio->bi_opf &= ~REQ_OP_MASK;
591 bio->bi_opf |= req_op(rq);
596 * We link the bounce buffer in and could have to traverse it
597 * later so we have to get a ref to prevent it from being freed
599 ret = blk_rq_append_bio(rq, &bio);
601 __blk_rq_unmap_user(orig_bio);
610 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
611 * @q: request queue where request should be inserted
612 * @rq: request to map data to
613 * @map_data: pointer to the rq_map_data holding pages (if necessary)
614 * @iter: iovec iterator
615 * @gfp_mask: memory allocation flags
618 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
619 * a kernel bounce buffer is used.
621 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
622 * still in process context.
624 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
625 * before being submitted to the device, as pages mapped may be out of
626 * reach. It's the callers responsibility to make sure this happens. The
627 * original bio must be passed back in to blk_rq_unmap_user() for proper
630 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
631 struct rq_map_data *map_data,
632 const struct iov_iter *iter, gfp_t gfp_mask)
635 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
636 struct bio *bio = NULL;
640 if (!iter_is_iovec(iter))
645 else if (iov_iter_alignment(iter) & align)
647 else if (queue_virt_boundary(q))
648 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
652 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
657 } while (iov_iter_count(&i));
662 blk_rq_unmap_user(bio);
667 EXPORT_SYMBOL(blk_rq_map_user_iov);
669 int blk_rq_map_user(struct request_queue *q, struct request *rq,
670 struct rq_map_data *map_data, void __user *ubuf,
671 unsigned long len, gfp_t gfp_mask)
675 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
677 if (unlikely(ret < 0))
680 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
682 EXPORT_SYMBOL(blk_rq_map_user);
685 * blk_rq_unmap_user - unmap a request with user data
686 * @bio: start of bio list
689 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
690 * supply the original rq->bio from the blk_rq_map_user() return, since
691 * the I/O completion may have changed rq->bio.
693 int blk_rq_unmap_user(struct bio *bio)
695 struct bio *mapped_bio;
700 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
701 mapped_bio = bio->bi_private;
703 ret2 = __blk_rq_unmap_user(mapped_bio);
714 EXPORT_SYMBOL(blk_rq_unmap_user);
717 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
718 * @q: request queue where request should be inserted
719 * @rq: request to fill
720 * @kbuf: the kernel buffer
721 * @len: length of user data
722 * @gfp_mask: memory allocation flags
725 * Data will be mapped directly if possible. Otherwise a bounce
726 * buffer is used. Can be called multiple times to append multiple
729 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
730 unsigned int len, gfp_t gfp_mask)
732 int reading = rq_data_dir(rq) == READ;
733 unsigned long addr = (unsigned long) kbuf;
734 struct bio *bio, *orig_bio;
737 if (len > (queue_max_hw_sectors(q) << 9))
742 if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf))
743 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
745 bio = bio_map_kern(q, kbuf, len, gfp_mask);
750 bio->bi_opf &= ~REQ_OP_MASK;
751 bio->bi_opf |= req_op(rq);
754 ret = blk_rq_append_bio(rq, &bio);
756 /* request is too big */
763 EXPORT_SYMBOL(blk_rq_map_kern);