2 * Functions related to mapping data to requests
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <scsi/sg.h> /* for struct sg_iovec */
12 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
16 blk_rq_bio_prep(q, rq, bio);
17 else if (!ll_back_merge_fn(q, rq, bio))
20 rq->biotail->bi_next = bio;
23 rq->__data_len += bio->bi_iter.bi_size;
28 static int __blk_rq_unmap_user(struct bio *bio)
33 if (bio_flagged(bio, BIO_USER_MAPPED))
36 ret = bio_uncopy_user(bio);
43 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
44 * @q: request queue where request should be inserted
45 * @rq: request to map data to
46 * @map_data: pointer to the rq_map_data holding pages (if necessary)
47 * @iov: pointer to the iovec
48 * @iov_count: number of elements in the iovec
49 * @len: I/O byte count
50 * @gfp_mask: memory allocation flags
53 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
54 * a kernel bounce buffer is used.
56 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
57 * still in process context.
59 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
60 * before being submitted to the device, as pages mapped may be out of
61 * reach. It's the callers responsibility to make sure this happens. The
62 * original bio must be passed back in to blk_rq_unmap_user() for proper
65 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
66 struct rq_map_data *map_data, const struct sg_iovec *iov,
67 int iov_count, unsigned int len, gfp_t gfp_mask)
70 int i, read = rq_data_dir(rq) == READ;
73 if (!iov || iov_count <= 0)
76 for (i = 0; i < iov_count; i++) {
77 unsigned long uaddr = (unsigned long)iov[i].iov_base;
83 * Keep going so we check length of all segments
85 if (uaddr & queue_dma_alignment(q))
89 if (unaligned || (q->dma_pad_mask & len) || map_data)
90 bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
93 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
98 if (bio->bi_iter.bi_size != len) {
100 * Grab an extra reference to this bio, as bio_unmap_user()
101 * expects to be able to drop it twice as it happens on the
102 * normal IO completion path
106 __blk_rq_unmap_user(bio);
110 if (!bio_flagged(bio, BIO_USER_MAPPED))
111 rq->cmd_flags |= REQ_COPY_USER;
113 blk_queue_bounce(q, &bio);
115 blk_rq_bio_prep(q, rq, bio);
118 EXPORT_SYMBOL(blk_rq_map_user_iov);
120 int blk_rq_map_user(struct request_queue *q, struct request *rq,
121 struct rq_map_data *map_data, void __user *ubuf,
122 unsigned long len, gfp_t gfp_mask)
126 iov.iov_base = (void __user *)ubuf;
129 return blk_rq_map_user_iov(q, rq, map_data, &iov, 1, len, gfp_mask);
131 EXPORT_SYMBOL(blk_rq_map_user);
134 * blk_rq_unmap_user - unmap a request with user data
135 * @bio: start of bio list
138 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
139 * supply the original rq->bio from the blk_rq_map_user() return, since
140 * the I/O completion may have changed rq->bio.
142 int blk_rq_unmap_user(struct bio *bio)
144 struct bio *mapped_bio;
149 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
150 mapped_bio = bio->bi_private;
152 ret2 = __blk_rq_unmap_user(mapped_bio);
163 EXPORT_SYMBOL(blk_rq_unmap_user);
166 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
167 * @q: request queue where request should be inserted
168 * @rq: request to fill
169 * @kbuf: the kernel buffer
170 * @len: length of user data
171 * @gfp_mask: memory allocation flags
174 * Data will be mapped directly if possible. Otherwise a bounce
175 * buffer is used. Can be called multiple times to append multiple
178 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
179 unsigned int len, gfp_t gfp_mask)
181 int reading = rq_data_dir(rq) == READ;
182 unsigned long addr = (unsigned long) kbuf;
187 if (len > (queue_max_hw_sectors(q) << 9))
192 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
194 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
196 bio = bio_map_kern(q, kbuf, len, gfp_mask);
202 bio->bi_rw |= REQ_WRITE;
205 rq->cmd_flags |= REQ_COPY_USER;
207 ret = blk_rq_append_bio(q, rq, bio);
209 /* request is too big */
214 blk_queue_bounce(q, &rq->bio);
217 EXPORT_SYMBOL(blk_rq_map_kern);