Merge branch 'acpica'
[linux-2.6-block.git] / drivers / md / dm-io.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software
891ce207 3 * Copyright (C) 2006 Red Hat GmbH
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
952b3557
MP
8#include "dm.h"
9
586e80e6 10#include <linux/device-mapper.h>
1da177e4
LT
11
12#include <linux/bio.h>
10f1d5d1 13#include <linux/completion.h>
1da177e4
LT
14#include <linux/mempool.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
a765e20e 18#include <linux/dm-io.h>
1da177e4 19
f1e53987
MP
20#define DM_MSG_PREFIX "io"
21
22#define DM_IO_MAX_REGIONS BITS_PER_LONG
23
891ce207
HM
24struct dm_io_client {
25 mempool_t *pool;
26 struct bio_set *bios;
27};
28
f1e53987
MP
29/*
30 * Aligning 'struct io' reduces the number of bits required to store
31 * its address. Refer to store_io_and_region_in_bio() below.
32 */
1da177e4 33struct io {
e01fd7ee 34 unsigned long error_bits;
1da177e4 35 atomic_t count;
891ce207 36 struct dm_io_client *client;
1da177e4
LT
37 io_notify_fn callback;
38 void *context;
bb91bc7b
MP
39 void *vma_invalidate_address;
40 unsigned long vma_invalidate_size;
f1e53987 41} __attribute__((aligned(DM_IO_MAX_REGIONS)));
1da177e4 42
952b3557
MP
43static struct kmem_cache *_dm_io_cache;
44
c8b03afe
HM
45/*
46 * Create a client with mempool and bioset.
47 */
bda8efec 48struct dm_io_client *dm_io_client_create(void)
c8b03afe 49{
c8b03afe 50 struct dm_io_client *client;
e8603136 51 unsigned min_ios = dm_get_reserved_bio_based_ios();
c8b03afe
HM
52
53 client = kmalloc(sizeof(*client), GFP_KERNEL);
54 if (!client)
55 return ERR_PTR(-ENOMEM);
56
e8603136 57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
c8b03afe
HM
58 if (!client->pool)
59 goto bad;
60
e8603136 61 client->bios = bioset_create(min_ios, 0);
c8b03afe
HM
62 if (!client->bios)
63 goto bad;
64
65 return client;
66
67 bad:
6f65985e 68 mempool_destroy(client->pool);
c8b03afe
HM
69 kfree(client);
70 return ERR_PTR(-ENOMEM);
71}
72EXPORT_SYMBOL(dm_io_client_create);
73
c8b03afe
HM
74void dm_io_client_destroy(struct dm_io_client *client)
75{
76 mempool_destroy(client->pool);
77 bioset_free(client->bios);
78 kfree(client);
79}
80EXPORT_SYMBOL(dm_io_client_destroy);
81
1da177e4
LT
82/*-----------------------------------------------------------------
83 * We need to keep track of which region a bio is doing io for.
f1e53987
MP
84 * To avoid a memory allocation to store just 5 or 6 bits, we
85 * ensure the 'struct io' pointer is aligned so enough low bits are
86 * always zero and then combine it with the region number directly in
87 * bi_private.
1da177e4 88 *---------------------------------------------------------------*/
f1e53987
MP
89static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
90 unsigned region)
1da177e4 91{
f1e53987
MP
92 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
93 DMCRIT("Unaligned struct io pointer %p", io);
94 BUG();
95 }
96
97 bio->bi_private = (void *)((unsigned long)io | region);
1da177e4
LT
98}
99
f1e53987
MP
100static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
101 unsigned *region)
1da177e4 102{
f1e53987
MP
103 unsigned long val = (unsigned long)bio->bi_private;
104
105 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
106 *region = val & (DM_IO_MAX_REGIONS - 1);
1da177e4
LT
107}
108
109/*-----------------------------------------------------------------
110 * We need an io object to keep track of the number of bios that
111 * have been dispatched for a particular io.
112 *---------------------------------------------------------------*/
97e7cdf1 113static void complete_io(struct io *io)
1da177e4 114{
97e7cdf1
JT
115 unsigned long error_bits = io->error_bits;
116 io_notify_fn fn = io->callback;
117 void *context = io->context;
1da177e4 118
97e7cdf1
JT
119 if (io->vma_invalidate_size)
120 invalidate_kernel_vmap_range(io->vma_invalidate_address,
121 io->vma_invalidate_size);
bb91bc7b 122
97e7cdf1
JT
123 mempool_free(io, io->client->pool);
124 fn(error_bits, context);
125}
1da177e4 126
97e7cdf1
JT
127static void dec_count(struct io *io, unsigned int region, int error)
128{
129 if (error)
130 set_bit(region, &io->error_bits);
1da177e4 131
97e7cdf1
JT
132 if (atomic_dec_and_test(&io->count))
133 complete_io(io);
1da177e4
LT
134}
135
4246a0b6 136static void endio(struct bio *bio)
1da177e4 137{
c897feb3
HM
138 struct io *io;
139 unsigned region;
9b81c842 140 int error;
1da177e4 141
4246a0b6 142 if (bio->bi_error && bio_data_dir(bio) == READ)
1da177e4
LT
143 zero_fill_bio(bio);
144
c897feb3
HM
145 /*
146 * The bio destructor in bio_put() may use the io object.
147 */
f1e53987 148 retrieve_io_and_region_from_bio(bio, &io, &region);
c897feb3 149
9b81c842 150 error = bio->bi_error;
1da177e4
LT
151 bio_put(bio);
152
9b81c842 153 dec_count(io, region, error);
1da177e4
LT
154}
155
156/*-----------------------------------------------------------------
157 * These little objects provide an abstraction for getting a new
158 * destination page for io.
159 *---------------------------------------------------------------*/
160struct dpages {
161 void (*get_page)(struct dpages *dp,
162 struct page **p, unsigned long *len, unsigned *offset);
163 void (*next_page)(struct dpages *dp);
164
165 unsigned context_u;
166 void *context_ptr;
bb91bc7b
MP
167
168 void *vma_invalidate_address;
169 unsigned long vma_invalidate_size;
1da177e4
LT
170};
171
172/*
173 * Functions for getting the pages from a list.
174 */
175static void list_get_page(struct dpages *dp,
176 struct page **p, unsigned long *len, unsigned *offset)
177{
178 unsigned o = dp->context_u;
179 struct page_list *pl = (struct page_list *) dp->context_ptr;
180
181 *p = pl->page;
182 *len = PAGE_SIZE - o;
183 *offset = o;
184}
185
186static void list_next_page(struct dpages *dp)
187{
188 struct page_list *pl = (struct page_list *) dp->context_ptr;
189 dp->context_ptr = pl->next;
190 dp->context_u = 0;
191}
192
193static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
194{
195 dp->get_page = list_get_page;
196 dp->next_page = list_next_page;
197 dp->context_u = offset;
198 dp->context_ptr = pl;
199}
200
201/*
202 * Functions for getting the pages from a bvec.
203 */
d73f9907
MP
204static void bio_get_page(struct dpages *dp, struct page **p,
205 unsigned long *len, unsigned *offset)
1da177e4 206{
d73f9907
MP
207 struct bio_vec *bvec = dp->context_ptr;
208 *p = bvec->bv_page;
209 *len = bvec->bv_len - dp->context_u;
210 *offset = bvec->bv_offset + dp->context_u;
1da177e4
LT
211}
212
003b5c57 213static void bio_next_page(struct dpages *dp)
1da177e4 214{
d73f9907
MP
215 struct bio_vec *bvec = dp->context_ptr;
216 dp->context_ptr = bvec + 1;
217 dp->context_u = 0;
1da177e4
LT
218}
219
003b5c57 220static void bio_dp_init(struct dpages *dp, struct bio *bio)
1da177e4 221{
003b5c57
KO
222 dp->get_page = bio_get_page;
223 dp->next_page = bio_next_page;
d73f9907
MP
224 dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
225 dp->context_u = bio->bi_iter.bi_bvec_done;
1da177e4
LT
226}
227
c8b03afe
HM
228/*
229 * Functions for getting the pages from a VMA.
230 */
1da177e4
LT
231static void vm_get_page(struct dpages *dp,
232 struct page **p, unsigned long *len, unsigned *offset)
233{
234 *p = vmalloc_to_page(dp->context_ptr);
235 *offset = dp->context_u;
236 *len = PAGE_SIZE - dp->context_u;
237}
238
239static void vm_next_page(struct dpages *dp)
240{
241 dp->context_ptr += PAGE_SIZE - dp->context_u;
242 dp->context_u = 0;
243}
244
245static void vm_dp_init(struct dpages *dp, void *data)
246{
247 dp->get_page = vm_get_page;
248 dp->next_page = vm_next_page;
93bbf583 249 dp->context_u = offset_in_page(data);
1da177e4
LT
250 dp->context_ptr = data;
251}
252
c8b03afe
HM
253/*
254 * Functions for getting the pages from kernel memory.
255 */
256static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
257 unsigned *offset)
258{
259 *p = virt_to_page(dp->context_ptr);
260 *offset = dp->context_u;
261 *len = PAGE_SIZE - dp->context_u;
262}
263
264static void km_next_page(struct dpages *dp)
265{
266 dp->context_ptr += PAGE_SIZE - dp->context_u;
267 dp->context_u = 0;
268}
269
270static void km_dp_init(struct dpages *dp, void *data)
271{
272 dp->get_page = km_get_page;
273 dp->next_page = km_next_page;
93bbf583 274 dp->context_u = offset_in_page(data);
c8b03afe
HM
275 dp->context_ptr = data;
276}
277
1da177e4
LT
278/*-----------------------------------------------------------------
279 * IO routines that accept a list of pages.
280 *---------------------------------------------------------------*/
22a1ceb1 281static void do_region(int rw, unsigned region, struct dm_io_region *where,
1da177e4
LT
282 struct dpages *dp, struct io *io)
283{
284 struct bio *bio;
285 struct page *page;
286 unsigned long len;
287 unsigned offset;
288 unsigned num_bvecs;
289 sector_t remaining = where->count;
0c535e0d 290 struct request_queue *q = bdev_get_queue(where->bdev);
70d6c400
MS
291 unsigned short logical_block_size = queue_logical_block_size(q);
292 sector_t num_sectors;
e5db2980 293 unsigned int uninitialized_var(special_cmd_max_sectors);
1da177e4 294
e5db2980
DW
295 /*
296 * Reject unsupported discard and write same requests.
297 */
298 if (rw & REQ_DISCARD)
299 special_cmd_max_sectors = q->limits.max_discard_sectors;
300 else if (rw & REQ_WRITE_SAME)
301 special_cmd_max_sectors = q->limits.max_write_same_sectors;
302 if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
37527b86
DW
303 dec_count(io, region, -EOPNOTSUPP);
304 return;
305 }
306
12fc0f49 307 /*
d87f4c14
TH
308 * where->count may be zero if rw holds a flush and we need to
309 * send a zero-sized flush.
12fc0f49
MP
310 */
311 do {
1da177e4 312 /*
f1e53987 313 * Allocate a suitably sized-bio.
1da177e4 314 */
70d6c400 315 if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
0c535e0d
MB
316 num_bvecs = 1;
317 else
b54ffb73 318 num_bvecs = min_t(int, BIO_MAX_PAGES,
0c535e0d
MB
319 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
320
bf17ce3a 321 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
4f024f37 322 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
1da177e4
LT
323 bio->bi_bdev = where->bdev;
324 bio->bi_end_io = endio;
f1e53987 325 store_io_and_region_in_bio(bio, io, region);
1da177e4 326
0c535e0d 327 if (rw & REQ_DISCARD) {
e5db2980 328 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
4f024f37 329 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
70d6c400
MS
330 remaining -= num_sectors;
331 } else if (rw & REQ_WRITE_SAME) {
332 /*
333 * WRITE SAME only uses a single page.
334 */
335 dp->get_page(dp, &page, &len, &offset);
336 bio_add_page(bio, page, logical_block_size, offset);
e5db2980 337 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
4f024f37 338 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
70d6c400
MS
339
340 offset = 0;
341 remaining -= num_sectors;
342 dp->next_page(dp);
0c535e0d
MB
343 } else while (remaining) {
344 /*
345 * Try and add as many pages as possible.
346 */
1da177e4
LT
347 dp->get_page(dp, &page, &len, &offset);
348 len = min(len, to_bytes(remaining));
349 if (!bio_add_page(bio, page, len, offset))
350 break;
351
352 offset = 0;
353 remaining -= to_sector(len);
354 dp->next_page(dp);
355 }
356
357 atomic_inc(&io->count);
358 submit_bio(rw, bio);
12fc0f49 359 } while (remaining);
1da177e4
LT
360}
361
362static void dispatch_io(int rw, unsigned int num_regions,
22a1ceb1 363 struct dm_io_region *where, struct dpages *dp,
1da177e4
LT
364 struct io *io, int sync)
365{
366 int i;
367 struct dpages old_pages = *dp;
368
f1e53987
MP
369 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
370
1da177e4 371 if (sync)
721a9602 372 rw |= REQ_SYNC;
1da177e4
LT
373
374 /*
375 * For multiple regions we need to be careful to rewind
376 * the dp object for each call to do_region.
377 */
378 for (i = 0; i < num_regions; i++) {
379 *dp = old_pages;
d87f4c14 380 if (where[i].count || (rw & REQ_FLUSH))
1da177e4
LT
381 do_region(rw, i, where + i, dp, io);
382 }
383
384 /*
f00b16ad 385 * Drop the extra reference that we were holding to avoid
1da177e4
LT
386 * the io being completed too early.
387 */
388 dec_count(io, 0, 0);
389}
390
97e7cdf1
JT
391struct sync_io {
392 unsigned long error_bits;
393 struct completion wait;
394};
395
396static void sync_io_complete(unsigned long error, void *context)
397{
398 struct sync_io *sio = context;
399
400 sio->error_bits = error;
401 complete(&sio->wait);
402}
403
891ce207 404static int sync_io(struct dm_io_client *client, unsigned int num_regions,
22a1ceb1 405 struct dm_io_region *where, int rw, struct dpages *dp,
891ce207 406 unsigned long *error_bits)
1da177e4 407{
97e7cdf1
JT
408 struct io *io;
409 struct sync_io sio;
1da177e4 410
7ff14a36 411 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
1da177e4
LT
412 WARN_ON(1);
413 return -EIO;
414 }
415
97e7cdf1
JT
416 init_completion(&sio.wait);
417
418 io = mempool_alloc(client->pool, GFP_NOIO);
f1e53987 419 io->error_bits = 0;
f1e53987 420 atomic_set(&io->count, 1); /* see dispatch_io() */
f1e53987 421 io->client = client;
97e7cdf1
JT
422 io->callback = sync_io_complete;
423 io->context = &sio;
1da177e4 424
bb91bc7b
MP
425 io->vma_invalidate_address = dp->vma_invalidate_address;
426 io->vma_invalidate_size = dp->vma_invalidate_size;
427
f1e53987 428 dispatch_io(rw, num_regions, where, dp, io, 1);
1da177e4 429
97e7cdf1 430 wait_for_completion_io(&sio.wait);
1da177e4 431
891ce207 432 if (error_bits)
97e7cdf1 433 *error_bits = sio.error_bits;
891ce207 434
97e7cdf1 435 return sio.error_bits ? -EIO : 0;
1da177e4
LT
436}
437
891ce207 438static int async_io(struct dm_io_client *client, unsigned int num_regions,
22a1ceb1 439 struct dm_io_region *where, int rw, struct dpages *dp,
891ce207 440 io_notify_fn fn, void *context)
1da177e4
LT
441{
442 struct io *io;
443
7ff14a36 444 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
1da177e4
LT
445 WARN_ON(1);
446 fn(1, context);
447 return -EIO;
448 }
449
bf17ce3a 450 io = mempool_alloc(client->pool, GFP_NOIO);
e01fd7ee 451 io->error_bits = 0;
1da177e4 452 atomic_set(&io->count, 1); /* see dispatch_io() */
891ce207 453 io->client = client;
1da177e4
LT
454 io->callback = fn;
455 io->context = context;
456
bb91bc7b
MP
457 io->vma_invalidate_address = dp->vma_invalidate_address;
458 io->vma_invalidate_size = dp->vma_invalidate_size;
459
1da177e4
LT
460 dispatch_io(rw, num_regions, where, dp, io, 0);
461 return 0;
462}
463
bb91bc7b
MP
464static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
465 unsigned long size)
c8b03afe
HM
466{
467 /* Set up dpages based on memory type */
bb91bc7b
MP
468
469 dp->vma_invalidate_address = NULL;
470 dp->vma_invalidate_size = 0;
471
c8b03afe
HM
472 switch (io_req->mem.type) {
473 case DM_IO_PAGE_LIST:
474 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
475 break;
476
003b5c57
KO
477 case DM_IO_BIO:
478 bio_dp_init(dp, io_req->mem.ptr.bio);
c8b03afe
HM
479 break;
480
481 case DM_IO_VMA:
bb91bc7b
MP
482 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
483 if ((io_req->bi_rw & RW_MASK) == READ) {
484 dp->vma_invalidate_address = io_req->mem.ptr.vma;
485 dp->vma_invalidate_size = size;
486 }
c8b03afe
HM
487 vm_dp_init(dp, io_req->mem.ptr.vma);
488 break;
489
490 case DM_IO_KMEM:
491 km_dp_init(dp, io_req->mem.ptr.addr);
492 break;
493
494 default:
495 return -EINVAL;
496 }
497
498 return 0;
499}
500
501/*
7ff14a36
MP
502 * New collapsed (a)synchronous interface.
503 *
504 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
97e7cdf1
JT
505 * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
506 * If you fail to do one of these, the IO will be submitted to the disk after
507 * q->unplug_delay, which defaults to 3ms in blk-settings.c.
c8b03afe
HM
508 */
509int dm_io(struct dm_io_request *io_req, unsigned num_regions,
22a1ceb1 510 struct dm_io_region *where, unsigned long *sync_error_bits)
c8b03afe
HM
511{
512 int r;
513 struct dpages dp;
514
bb91bc7b 515 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
c8b03afe
HM
516 if (r)
517 return r;
518
519 if (!io_req->notify.fn)
520 return sync_io(io_req->client, num_regions, where,
521 io_req->bi_rw, &dp, sync_error_bits);
522
523 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
524 &dp, io_req->notify.fn, io_req->notify.context);
525}
526EXPORT_SYMBOL(dm_io);
952b3557
MP
527
528int __init dm_io_init(void)
529{
530 _dm_io_cache = KMEM_CACHE(io, 0);
531 if (!_dm_io_cache)
532 return -ENOMEM;
533
534 return 0;
535}
536
537void dm_io_exit(void)
538{
539 kmem_cache_destroy(_dm_io_cache);
540 _dm_io_cache = NULL;
541}