drivers/rtc/rtc-da9052.c: add constraints to set valid year
[linux-2.6-block.git] / block / blk-map.c
CommitLineData
86db1e29
JA
1/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
26e49cfc 8#include <linux/uio.h>
86db1e29
JA
9
10#include "blk.h"
11
12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 struct bio *bio)
14{
15 if (!rq->bio)
16 blk_rq_bio_prep(q, rq, bio);
17 else if (!ll_back_merge_fn(q, rq, bio))
18 return -EINVAL;
19 else {
20 rq->biotail->bi_next = bio;
21 rq->biotail = bio;
22
4f024f37 23 rq->__data_len += bio->bi_iter.bi_size;
86db1e29
JA
24 }
25 return 0;
26}
86db1e29
JA
27
28static int __blk_rq_unmap_user(struct bio *bio)
29{
30 int ret = 0;
31
32 if (bio) {
33 if (bio_flagged(bio, BIO_USER_MAPPED))
34 bio_unmap_user(bio);
35 else
36 ret = bio_uncopy_user(bio);
37 }
38
39 return ret;
40}
41
86db1e29 42/**
710027a4 43 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e29
JA
44 * @q: request queue where request should be inserted
45 * @rq: request to map data to
152e283f 46 * @map_data: pointer to the rq_map_data holding pages (if necessary)
26e49cfc 47 * @iter: iovec iterator
a3bce90e 48 * @gfp_mask: memory allocation flags
86db1e29
JA
49 *
50 * Description:
710027a4 51 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
86db1e29
JA
52 * a kernel bounce buffer is used.
53 *
710027a4 54 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
86db1e29
JA
55 * still in process context.
56 *
57 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
58 * before being submitted to the device, as pages mapped may be out of
59 * reach. It's the callers responsibility to make sure this happens. The
60 * original bio must be passed back in to blk_rq_unmap_user() for proper
61 * unmapping.
62 */
63int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
26e49cfc
KO
64 struct rq_map_data *map_data,
65 const struct iov_iter *iter, gfp_t gfp_mask)
86db1e29
JA
66{
67 struct bio *bio;
afdc1a78 68 int unaligned = 0;
26e49cfc
KO
69 struct iov_iter i;
70 struct iovec iov;
86db1e29 71
26e49cfc 72 if (!iter || !iter->count)
86db1e29
JA
73 return -EINVAL;
74
26e49cfc
KO
75 iov_for_each(iov, i, *iter) {
76 unsigned long uaddr = (unsigned long) iov.iov_base;
afdc1a78 77
26e49cfc 78 if (!iov.iov_len)
54787556
XF
79 return -EINVAL;
80
6b76106d
BH
81 /*
82 * Keep going so we check length of all segments
83 */
84 if (uaddr & queue_dma_alignment(q))
afdc1a78 85 unaligned = 1;
afdc1a78
FT
86 }
87
26e49cfc
KO
88 if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
89 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
afdc1a78 90 else
37f19e57 91 bio = bio_map_user_iov(q, iter, gfp_mask);
afdc1a78 92
86db1e29
JA
93 if (IS_ERR(bio))
94 return PTR_ERR(bio);
95
a0763b27
CH
96 if (map_data && map_data->null_mapped)
97 bio->bi_flags |= (1 << BIO_NULL_MAPPED);
98
26e49cfc 99 if (bio->bi_iter.bi_size != iter->count) {
c26156b2
JA
100 /*
101 * Grab an extra reference to this bio, as bio_unmap_user()
102 * expects to be able to drop it twice as it happens on the
103 * normal IO completion path
104 */
105 bio_get(bio);
86db1e29 106 bio_endio(bio, 0);
53cc0b29 107 __blk_rq_unmap_user(bio);
86db1e29
JA
108 return -EINVAL;
109 }
110
f18573ab
FT
111 if (!bio_flagged(bio, BIO_USER_MAPPED))
112 rq->cmd_flags |= REQ_COPY_USER;
113
07359fc6 114 blk_queue_bounce(q, &bio);
86db1e29
JA
115 bio_get(bio);
116 blk_rq_bio_prep(q, rq, bio);
86db1e29
JA
117 return 0;
118}
152e283f 119EXPORT_SYMBOL(blk_rq_map_user_iov);
86db1e29 120
ddad8dd0
CH
121int blk_rq_map_user(struct request_queue *q, struct request *rq,
122 struct rq_map_data *map_data, void __user *ubuf,
123 unsigned long len, gfp_t gfp_mask)
124{
26e49cfc
KO
125 struct iovec iov;
126 struct iov_iter i;
8f7e885a 127 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
ddad8dd0 128
8f7e885a
AV
129 if (unlikely(ret < 0))
130 return ret;
ddad8dd0 131
26e49cfc 132 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
ddad8dd0
CH
133}
134EXPORT_SYMBOL(blk_rq_map_user);
135
86db1e29
JA
136/**
137 * blk_rq_unmap_user - unmap a request with user data
138 * @bio: start of bio list
139 *
140 * Description:
141 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
142 * supply the original rq->bio from the blk_rq_map_user() return, since
710027a4 143 * the I/O completion may have changed rq->bio.
86db1e29
JA
144 */
145int blk_rq_unmap_user(struct bio *bio)
146{
147 struct bio *mapped_bio;
148 int ret = 0, ret2;
149
150 while (bio) {
151 mapped_bio = bio;
152 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
153 mapped_bio = bio->bi_private;
154
155 ret2 = __blk_rq_unmap_user(mapped_bio);
156 if (ret2 && !ret)
157 ret = ret2;
158
159 mapped_bio = bio;
160 bio = bio->bi_next;
161 bio_put(mapped_bio);
162 }
163
164 return ret;
165}
86db1e29
JA
166EXPORT_SYMBOL(blk_rq_unmap_user);
167
168/**
710027a4 169 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e29
JA
170 * @q: request queue where request should be inserted
171 * @rq: request to fill
172 * @kbuf: the kernel buffer
173 * @len: length of user data
174 * @gfp_mask: memory allocation flags
68154e90
FT
175 *
176 * Description:
177 * Data will be mapped directly if possible. Otherwise a bounce
e227867f 178 * buffer is used. Can be called multiple times to append multiple
3a5a3927 179 * buffers.
86db1e29
JA
180 */
181int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
182 unsigned int len, gfp_t gfp_mask)
183{
68154e90 184 int reading = rq_data_dir(rq) == READ;
14417799 185 unsigned long addr = (unsigned long) kbuf;
68154e90 186 int do_copy = 0;
86db1e29 187 struct bio *bio;
3a5a3927 188 int ret;
86db1e29 189
ae03bf63 190 if (len > (queue_max_hw_sectors(q) << 9))
86db1e29
JA
191 return -EINVAL;
192 if (!len || !kbuf)
193 return -EINVAL;
194
14417799 195 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
68154e90
FT
196 if (do_copy)
197 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
198 else
199 bio = bio_map_kern(q, kbuf, len, gfp_mask);
200
86db1e29
JA
201 if (IS_ERR(bio))
202 return PTR_ERR(bio);
203
609f6ea1 204 if (!reading)
a45dc2d2 205 bio->bi_rw |= REQ_WRITE;
86db1e29 206
68154e90
FT
207 if (do_copy)
208 rq->cmd_flags |= REQ_COPY_USER;
209
3a5a3927
JB
210 ret = blk_rq_append_bio(q, rq, bio);
211 if (unlikely(ret)) {
212 /* request is too big */
213 bio_put(bio);
214 return ret;
215 }
216
86db1e29 217 blk_queue_bounce(q, &rq->bio);
86db1e29
JA
218 return 0;
219}
86db1e29 220EXPORT_SYMBOL(blk_rq_map_kern);