| 1 | /* |
| 2 | * Functions related to mapping data to requests |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/bio.h> |
| 7 | #include <linux/blkdev.h> |
| 8 | #include <linux/uio.h> |
| 9 | |
| 10 | #include "blk.h" |
| 11 | |
| 12 | static bool iovec_gap_to_prv(struct request_queue *q, |
| 13 | struct iovec *prv, struct iovec *cur) |
| 14 | { |
| 15 | unsigned long prev_end; |
| 16 | |
| 17 | if (!queue_virt_boundary(q)) |
| 18 | return false; |
| 19 | |
| 20 | if (prv->iov_base == NULL && prv->iov_len == 0) |
| 21 | /* prv is not set - don't check */ |
| 22 | return false; |
| 23 | |
| 24 | prev_end = (unsigned long)(prv->iov_base + prv->iov_len); |
| 25 | |
| 26 | return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) || |
| 27 | prev_end & queue_virt_boundary(q)); |
| 28 | } |
| 29 | |
| 30 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, |
| 31 | struct bio *bio) |
| 32 | { |
| 33 | if (!rq->bio) |
| 34 | blk_rq_bio_prep(q, rq, bio); |
| 35 | else if (!ll_back_merge_fn(q, rq, bio)) |
| 36 | return -EINVAL; |
| 37 | else { |
| 38 | rq->biotail->bi_next = bio; |
| 39 | rq->biotail = bio; |
| 40 | |
| 41 | rq->__data_len += bio->bi_iter.bi_size; |
| 42 | } |
| 43 | return 0; |
| 44 | } |
| 45 | |
| 46 | static int __blk_rq_unmap_user(struct bio *bio) |
| 47 | { |
| 48 | int ret = 0; |
| 49 | |
| 50 | if (bio) { |
| 51 | if (bio_flagged(bio, BIO_USER_MAPPED)) |
| 52 | bio_unmap_user(bio); |
| 53 | else |
| 54 | ret = bio_uncopy_user(bio); |
| 55 | } |
| 56 | |
| 57 | return ret; |
| 58 | } |
| 59 | |
| 60 | /** |
| 61 | * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
| 62 | * @q: request queue where request should be inserted |
| 63 | * @rq: request to map data to |
| 64 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
| 65 | * @iter: iovec iterator |
| 66 | * @gfp_mask: memory allocation flags |
| 67 | * |
| 68 | * Description: |
| 69 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
| 70 | * a kernel bounce buffer is used. |
| 71 | * |
| 72 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
| 73 | * still in process context. |
| 74 | * |
| 75 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() |
| 76 | * before being submitted to the device, as pages mapped may be out of |
| 77 | * reach. It's the callers responsibility to make sure this happens. The |
| 78 | * original bio must be passed back in to blk_rq_unmap_user() for proper |
| 79 | * unmapping. |
| 80 | */ |
| 81 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
| 82 | struct rq_map_data *map_data, |
| 83 | const struct iov_iter *iter, gfp_t gfp_mask) |
| 84 | { |
| 85 | struct bio *bio; |
| 86 | int unaligned = 0; |
| 87 | struct iov_iter i; |
| 88 | struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; |
| 89 | |
| 90 | if (!iter || !iter->count) |
| 91 | return -EINVAL; |
| 92 | |
| 93 | iov_for_each(iov, i, *iter) { |
| 94 | unsigned long uaddr = (unsigned long) iov.iov_base; |
| 95 | |
| 96 | if (!iov.iov_len) |
| 97 | return -EINVAL; |
| 98 | |
| 99 | /* |
| 100 | * Keep going so we check length of all segments |
| 101 | */ |
| 102 | if ((uaddr & queue_dma_alignment(q)) || |
| 103 | iovec_gap_to_prv(q, &prv, &iov)) |
| 104 | unaligned = 1; |
| 105 | |
| 106 | prv.iov_base = iov.iov_base; |
| 107 | prv.iov_len = iov.iov_len; |
| 108 | } |
| 109 | |
| 110 | if (unaligned || (q->dma_pad_mask & iter->count) || map_data) |
| 111 | bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); |
| 112 | else |
| 113 | bio = bio_map_user_iov(q, iter, gfp_mask); |
| 114 | |
| 115 | if (IS_ERR(bio)) |
| 116 | return PTR_ERR(bio); |
| 117 | |
| 118 | if (map_data && map_data->null_mapped) |
| 119 | bio_set_flag(bio, BIO_NULL_MAPPED); |
| 120 | |
| 121 | if (bio->bi_iter.bi_size != iter->count) { |
| 122 | /* |
| 123 | * Grab an extra reference to this bio, as bio_unmap_user() |
| 124 | * expects to be able to drop it twice as it happens on the |
| 125 | * normal IO completion path |
| 126 | */ |
| 127 | bio_get(bio); |
| 128 | bio_endio(bio); |
| 129 | __blk_rq_unmap_user(bio); |
| 130 | return -EINVAL; |
| 131 | } |
| 132 | |
| 133 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
| 134 | rq->cmd_flags |= REQ_COPY_USER; |
| 135 | |
| 136 | blk_queue_bounce(q, &bio); |
| 137 | bio_get(bio); |
| 138 | blk_rq_bio_prep(q, rq, bio); |
| 139 | return 0; |
| 140 | } |
| 141 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
| 142 | |
| 143 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
| 144 | struct rq_map_data *map_data, void __user *ubuf, |
| 145 | unsigned long len, gfp_t gfp_mask) |
| 146 | { |
| 147 | struct iovec iov; |
| 148 | struct iov_iter i; |
| 149 | int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); |
| 150 | |
| 151 | if (unlikely(ret < 0)) |
| 152 | return ret; |
| 153 | |
| 154 | return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); |
| 155 | } |
| 156 | EXPORT_SYMBOL(blk_rq_map_user); |
| 157 | |
| 158 | /** |
| 159 | * blk_rq_unmap_user - unmap a request with user data |
| 160 | * @bio: start of bio list |
| 161 | * |
| 162 | * Description: |
| 163 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must |
| 164 | * supply the original rq->bio from the blk_rq_map_user() return, since |
| 165 | * the I/O completion may have changed rq->bio. |
| 166 | */ |
| 167 | int blk_rq_unmap_user(struct bio *bio) |
| 168 | { |
| 169 | struct bio *mapped_bio; |
| 170 | int ret = 0, ret2; |
| 171 | |
| 172 | while (bio) { |
| 173 | mapped_bio = bio; |
| 174 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) |
| 175 | mapped_bio = bio->bi_private; |
| 176 | |
| 177 | ret2 = __blk_rq_unmap_user(mapped_bio); |
| 178 | if (ret2 && !ret) |
| 179 | ret = ret2; |
| 180 | |
| 181 | mapped_bio = bio; |
| 182 | bio = bio->bi_next; |
| 183 | bio_put(mapped_bio); |
| 184 | } |
| 185 | |
| 186 | return ret; |
| 187 | } |
| 188 | EXPORT_SYMBOL(blk_rq_unmap_user); |
| 189 | |
| 190 | /** |
| 191 | * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage |
| 192 | * @q: request queue where request should be inserted |
| 193 | * @rq: request to fill |
| 194 | * @kbuf: the kernel buffer |
| 195 | * @len: length of user data |
| 196 | * @gfp_mask: memory allocation flags |
| 197 | * |
| 198 | * Description: |
| 199 | * Data will be mapped directly if possible. Otherwise a bounce |
| 200 | * buffer is used. Can be called multiple times to append multiple |
| 201 | * buffers. |
| 202 | */ |
| 203 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, |
| 204 | unsigned int len, gfp_t gfp_mask) |
| 205 | { |
| 206 | int reading = rq_data_dir(rq) == READ; |
| 207 | unsigned long addr = (unsigned long) kbuf; |
| 208 | int do_copy = 0; |
| 209 | struct bio *bio; |
| 210 | int ret; |
| 211 | |
| 212 | if (len > (queue_max_hw_sectors(q) << 9)) |
| 213 | return -EINVAL; |
| 214 | if (!len || !kbuf) |
| 215 | return -EINVAL; |
| 216 | |
| 217 | do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
| 218 | if (do_copy) |
| 219 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); |
| 220 | else |
| 221 | bio = bio_map_kern(q, kbuf, len, gfp_mask); |
| 222 | |
| 223 | if (IS_ERR(bio)) |
| 224 | return PTR_ERR(bio); |
| 225 | |
| 226 | if (!reading) |
| 227 | bio->bi_rw |= REQ_WRITE; |
| 228 | |
| 229 | if (do_copy) |
| 230 | rq->cmd_flags |= REQ_COPY_USER; |
| 231 | |
| 232 | ret = blk_rq_append_bio(q, rq, bio); |
| 233 | if (unlikely(ret)) { |
| 234 | /* request is too big */ |
| 235 | bio_put(bio); |
| 236 | return ret; |
| 237 | } |
| 238 | |
| 239 | blk_queue_bounce(q, &rq->bio); |
| 240 | return 0; |
| 241 | } |
| 242 | EXPORT_SYMBOL(blk_rq_map_kern); |