Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jk/spufs into...
[linux-2.6-block.git] / block / blk-map.c
CommitLineData
86db1e29
JA
1/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8
9#include "blk.h"
10
11int blk_rq_append_bio(struct request_queue *q, struct request *rq,
12 struct bio *bio)
13{
14 if (!rq->bio)
15 blk_rq_bio_prep(q, rq, bio);
16 else if (!ll_back_merge_fn(q, rq, bio))
17 return -EINVAL;
18 else {
19 rq->biotail->bi_next = bio;
20 rq->biotail = bio;
21
6b00769f 22 rq->raw_data_len += bio->bi_size;
86db1e29
JA
23 rq->data_len += bio->bi_size;
24 }
25 return 0;
26}
27EXPORT_SYMBOL(blk_rq_append_bio);
28
29static int __blk_rq_unmap_user(struct bio *bio)
30{
31 int ret = 0;
32
33 if (bio) {
34 if (bio_flagged(bio, BIO_USER_MAPPED))
35 bio_unmap_user(bio);
36 else
37 ret = bio_uncopy_user(bio);
38 }
39
40 return ret;
41}
42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 void __user *ubuf, unsigned int len)
45{
46 unsigned long uaddr;
47 struct bio *bio, *orig_bio;
48 int reading, ret;
49
50 reading = rq_data_dir(rq) == READ;
51
52 /*
53 * if alignment requirement is satisfied, map in user pages for
54 * direct dma. else, set up kernel bounce buffers
55 */
56 uaddr = (unsigned long) ubuf;
6728cb0e
JA
57 if (!(uaddr & queue_dma_alignment(q)) &&
58 !(len & queue_dma_alignment(q)))
86db1e29
JA
59 bio = bio_map_user(q, NULL, uaddr, len, reading);
60 else
61 bio = bio_copy_user(q, uaddr, len, reading);
62
63 if (IS_ERR(bio))
64 return PTR_ERR(bio);
65
66 orig_bio = bio;
67 blk_queue_bounce(q, &bio);
68
69 /*
70 * We link the bounce buffer in and could have to traverse it
71 * later so we have to get a ref to prevent it from being freed
72 */
73 bio_get(bio);
74
75 ret = blk_rq_append_bio(q, rq, bio);
76 if (!ret)
77 return bio->bi_size;
78
79 /* if it was boucned we must call the end io function */
80 bio_endio(bio, 0);
81 __blk_rq_unmap_user(orig_bio);
82 bio_put(bio);
83 return ret;
84}
85
86/**
87 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
88 * @q: request queue where request should be inserted
89 * @rq: request structure to fill
90 * @ubuf: the user buffer
91 * @len: length of user data
92 *
93 * Description:
94 * Data will be mapped directly for zero copy io, if possible. Otherwise
95 * a kernel bounce buffer is used.
96 *
97 * A matching blk_rq_unmap_user() must be issued at the end of io, while
98 * still in process context.
99 *
100 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
101 * before being submitted to the device, as pages mapped may be out of
102 * reach. It's the callers responsibility to make sure this happens. The
103 * original bio must be passed back in to blk_rq_unmap_user() for proper
104 * unmapping.
105 */
106int blk_rq_map_user(struct request_queue *q, struct request *rq,
107 void __user *ubuf, unsigned long len)
108{
109 unsigned long bytes_read = 0;
110 struct bio *bio = NULL;
111 int ret;
112
113 if (len > (q->max_hw_sectors << 9))
114 return -EINVAL;
115 if (!len || !ubuf)
116 return -EINVAL;
117
118 while (bytes_read != len) {
119 unsigned long map_len, end, start;
120
121 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
122 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
123 >> PAGE_SHIFT;
124 start = (unsigned long)ubuf >> PAGE_SHIFT;
125
126 /*
127 * A bad offset could cause us to require BIO_MAX_PAGES + 1
128 * pages. If this happens we just lower the requested
129 * mapping len by a page so that we can fit
130 */
131 if (end - start > BIO_MAX_PAGES)
132 map_len -= PAGE_SIZE;
133
134 ret = __blk_rq_map_user(q, rq, ubuf, map_len);
135 if (ret < 0)
136 goto unmap_rq;
137 if (!bio)
138 bio = rq->bio;
139 bytes_read += ret;
140 ubuf += ret;
141 }
142
40b01b9b
TH
143 /*
144 * __blk_rq_map_user() copies the buffers if starting address
145 * or length isn't aligned. As the copied buffer is always
146 * page aligned, we know that there's enough room for padding.
147 * Extend the last bio and update rq->data_len accordingly.
148 *
149 * On unmap, bio_uncopy_user() will use unmodified
150 * bio_map_data pointed to by bio->bi_private.
151 */
152 if (len & queue_dma_alignment(q)) {
153 unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1;
154 struct bio *bio = rq->biotail;
155
156 bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
157 bio->bi_size += pad_len;
6b00769f 158 rq->data_len += pad_len;
40b01b9b
TH
159 }
160
86db1e29
JA
161 rq->buffer = rq->data = NULL;
162 return 0;
163unmap_rq:
164 blk_rq_unmap_user(bio);
84e9e03c 165 rq->bio = NULL;
86db1e29
JA
166 return ret;
167}
86db1e29
JA
168EXPORT_SYMBOL(blk_rq_map_user);
169
170/**
171 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
172 * @q: request queue where request should be inserted
173 * @rq: request to map data to
174 * @iov: pointer to the iovec
175 * @iov_count: number of elements in the iovec
176 * @len: I/O byte count
177 *
178 * Description:
179 * Data will be mapped directly for zero copy io, if possible. Otherwise
180 * a kernel bounce buffer is used.
181 *
182 * A matching blk_rq_unmap_user() must be issued at the end of io, while
183 * still in process context.
184 *
185 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
186 * before being submitted to the device, as pages mapped may be out of
187 * reach. It's the callers responsibility to make sure this happens. The
188 * original bio must be passed back in to blk_rq_unmap_user() for proper
189 * unmapping.
190 */
191int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
192 struct sg_iovec *iov, int iov_count, unsigned int len)
193{
194 struct bio *bio;
195
196 if (!iov || iov_count <= 0)
197 return -EINVAL;
198
199 /* we don't allow misaligned data like bio_map_user() does. If the
200 * user is using sg, they're expected to know the alignment constraints
201 * and respect them accordingly */
6728cb0e
JA
202 bio = bio_map_user_iov(q, NULL, iov, iov_count,
203 rq_data_dir(rq) == READ);
86db1e29
JA
204 if (IS_ERR(bio))
205 return PTR_ERR(bio);
206
207 if (bio->bi_size != len) {
208 bio_endio(bio, 0);
209 bio_unmap_user(bio);
210 return -EINVAL;
211 }
212
213 bio_get(bio);
214 blk_rq_bio_prep(q, rq, bio);
215 rq->buffer = rq->data = NULL;
216 return 0;
217}
86db1e29
JA
218EXPORT_SYMBOL(blk_rq_map_user_iov);
219
220/**
221 * blk_rq_unmap_user - unmap a request with user data
222 * @bio: start of bio list
223 *
224 * Description:
225 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
226 * supply the original rq->bio from the blk_rq_map_user() return, since
227 * the io completion may have changed rq->bio.
228 */
229int blk_rq_unmap_user(struct bio *bio)
230{
231 struct bio *mapped_bio;
232 int ret = 0, ret2;
233
234 while (bio) {
235 mapped_bio = bio;
236 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
237 mapped_bio = bio->bi_private;
238
239 ret2 = __blk_rq_unmap_user(mapped_bio);
240 if (ret2 && !ret)
241 ret = ret2;
242
243 mapped_bio = bio;
244 bio = bio->bi_next;
245 bio_put(mapped_bio);
246 }
247
248 return ret;
249}
86db1e29
JA
250EXPORT_SYMBOL(blk_rq_unmap_user);
251
252/**
253 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
254 * @q: request queue where request should be inserted
255 * @rq: request to fill
256 * @kbuf: the kernel buffer
257 * @len: length of user data
258 * @gfp_mask: memory allocation flags
259 */
260int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
261 unsigned int len, gfp_t gfp_mask)
262{
263 struct bio *bio;
264
265 if (len > (q->max_hw_sectors << 9))
266 return -EINVAL;
267 if (!len || !kbuf)
268 return -EINVAL;
269
270 bio = bio_map_kern(q, kbuf, len, gfp_mask);
271 if (IS_ERR(bio))
272 return PTR_ERR(bio);
273
274 if (rq_data_dir(rq) == WRITE)
275 bio->bi_rw |= (1 << BIO_RW);
276
277 blk_rq_bio_prep(q, rq, bio);
278 blk_queue_bounce(q, &rq->bio);
279 rq->buffer = rq->data = NULL;
280 return 0;
281}
86db1e29 282EXPORT_SYMBOL(blk_rq_map_kern);