dm: remove check that prevents mapping empty bios
[linux-2.6-block.git] / drivers / md / dm.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
784aae73 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
51e5b2bd 9#include "dm-uevent.h"
1da177e4
LT
10
11#include <linux/init.h>
12#include <linux/module.h>
48c9c27b 13#include <linux/mutex.h>
1da177e4
LT
14#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/buffer_head.h>
18#include <linux/mempool.h>
19#include <linux/slab.h>
20#include <linux/idr.h>
3ac51e74 21#include <linux/hdreg.h>
55782138
LZ
22
23#include <trace/events/block.h>
1da177e4 24
72d94861
AK
25#define DM_MSG_PREFIX "core"
26
1da177e4
LT
27static const char *_name = DM_NAME;
28
29static unsigned int major = 0;
30static unsigned int _major = 0;
31
f32c10b0 32static DEFINE_SPINLOCK(_minor_lock);
1da177e4 33/*
8fbf26ad 34 * For bio-based dm.
1da177e4
LT
35 * One of these is allocated per bio.
36 */
37struct dm_io {
38 struct mapped_device *md;
39 int error;
1da177e4 40 atomic_t io_count;
6ae2fa67 41 struct bio *bio;
3eaf840e 42 unsigned long start_time;
1da177e4
LT
43};
44
45/*
8fbf26ad 46 * For bio-based dm.
1da177e4
LT
47 * One of these is allocated per target within a bio. Hopefully
48 * this will be simplified out one day.
49 */
028867ac 50struct dm_target_io {
1da177e4
LT
51 struct dm_io *io;
52 struct dm_target *ti;
53 union map_info info;
54};
55
8fbf26ad
KU
56/*
57 * For request-based dm.
58 * One of these is allocated per request.
59 */
60struct dm_rq_target_io {
61 struct mapped_device *md;
62 struct dm_target *ti;
63 struct request *orig, clone;
64 int error;
65 union map_info info;
66};
67
68/*
69 * For request-based dm.
70 * One of these is allocated per bio.
71 */
72struct dm_rq_clone_bio_info {
73 struct bio *orig;
74 struct request *rq;
75};
76
1da177e4
LT
77union map_info *dm_get_mapinfo(struct bio *bio)
78{
17b2f66f 79 if (bio && bio->bi_private)
028867ac 80 return &((struct dm_target_io *)bio->bi_private)->info;
17b2f66f 81 return NULL;
1da177e4
LT
82}
83
ba61fdd1
JM
84#define MINOR_ALLOCED ((void *)-1)
85
1da177e4
LT
86/*
87 * Bits for the md->flags field.
88 */
1eb787ec 89#define DMF_BLOCK_IO_FOR_SUSPEND 0
1da177e4 90#define DMF_SUSPENDED 1
aa8d7c2f 91#define DMF_FROZEN 2
fba9f90e 92#define DMF_FREEING 3
5c6bd75d 93#define DMF_DELETING 4
2e93ccc1 94#define DMF_NOFLUSH_SUSPENDING 5
1eb787ec 95#define DMF_QUEUE_IO_TO_THREAD 6
1da177e4 96
304f3f6a
MB
97/*
98 * Work processed by per-device workqueue.
99 */
1da177e4 100struct mapped_device {
2ca3310e 101 struct rw_semaphore io_lock;
e61290a4 102 struct mutex suspend_lock;
1da177e4
LT
103 rwlock_t map_lock;
104 atomic_t holders;
5c6bd75d 105 atomic_t open_count;
1da177e4
LT
106
107 unsigned long flags;
108
165125e1 109 struct request_queue *queue;
1da177e4 110 struct gendisk *disk;
7e51f257 111 char name[16];
1da177e4
LT
112
113 void *interface_ptr;
114
115 /*
116 * A list of ios that arrived while we were suspended.
117 */
118 atomic_t pending;
119 wait_queue_head_t wait;
53d5914f 120 struct work_struct work;
74859364 121 struct bio_list deferred;
022c2611 122 spinlock_t deferred_lock;
1da177e4 123
af7e466a
MP
124 /*
125 * An error from the barrier request currently being processed.
126 */
127 int barrier_error;
128
304f3f6a
MB
129 /*
130 * Processing queue (flush/barriers)
131 */
132 struct workqueue_struct *wq;
133
1da177e4
LT
134 /*
135 * The current mapping.
136 */
137 struct dm_table *map;
138
139 /*
140 * io objects are allocated from here.
141 */
142 mempool_t *io_pool;
143 mempool_t *tio_pool;
144
9faf400f
SB
145 struct bio_set *bs;
146
1da177e4
LT
147 /*
148 * Event handling.
149 */
150 atomic_t event_nr;
151 wait_queue_head_t eventq;
7a8c3d3b
MA
152 atomic_t uevent_seq;
153 struct list_head uevent_list;
154 spinlock_t uevent_lock; /* Protect access to uevent_list */
1da177e4
LT
155
156 /*
157 * freeze/thaw support require holding onto a super block
158 */
159 struct super_block *frozen_sb;
db8fef4f 160 struct block_device *bdev;
3ac51e74
DW
161
162 /* forced geometry settings */
163 struct hd_geometry geometry;
784aae73
MB
164
165 /* sysfs handle */
166 struct kobject kobj;
1da177e4
LT
167};
168
169#define MIN_IOS 256
e18b890b
CL
170static struct kmem_cache *_io_cache;
171static struct kmem_cache *_tio_cache;
8fbf26ad
KU
172static struct kmem_cache *_rq_tio_cache;
173static struct kmem_cache *_rq_bio_info_cache;
1da177e4 174
1da177e4
LT
175static int __init local_init(void)
176{
51157b4a 177 int r = -ENOMEM;
1da177e4 178
1da177e4 179 /* allocate a slab for the dm_ios */
028867ac 180 _io_cache = KMEM_CACHE(dm_io, 0);
1da177e4 181 if (!_io_cache)
51157b4a 182 return r;
1da177e4
LT
183
184 /* allocate a slab for the target ios */
028867ac 185 _tio_cache = KMEM_CACHE(dm_target_io, 0);
51157b4a
KU
186 if (!_tio_cache)
187 goto out_free_io_cache;
1da177e4 188
8fbf26ad
KU
189 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
190 if (!_rq_tio_cache)
191 goto out_free_tio_cache;
192
193 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
194 if (!_rq_bio_info_cache)
195 goto out_free_rq_tio_cache;
196
51e5b2bd 197 r = dm_uevent_init();
51157b4a 198 if (r)
8fbf26ad 199 goto out_free_rq_bio_info_cache;
51e5b2bd 200
1da177e4
LT
201 _major = major;
202 r = register_blkdev(_major, _name);
51157b4a
KU
203 if (r < 0)
204 goto out_uevent_exit;
1da177e4
LT
205
206 if (!_major)
207 _major = r;
208
209 return 0;
51157b4a
KU
210
211out_uevent_exit:
212 dm_uevent_exit();
8fbf26ad
KU
213out_free_rq_bio_info_cache:
214 kmem_cache_destroy(_rq_bio_info_cache);
215out_free_rq_tio_cache:
216 kmem_cache_destroy(_rq_tio_cache);
51157b4a
KU
217out_free_tio_cache:
218 kmem_cache_destroy(_tio_cache);
219out_free_io_cache:
220 kmem_cache_destroy(_io_cache);
221
222 return r;
1da177e4
LT
223}
224
225static void local_exit(void)
226{
8fbf26ad
KU
227 kmem_cache_destroy(_rq_bio_info_cache);
228 kmem_cache_destroy(_rq_tio_cache);
1da177e4
LT
229 kmem_cache_destroy(_tio_cache);
230 kmem_cache_destroy(_io_cache);
00d59405 231 unregister_blkdev(_major, _name);
51e5b2bd 232 dm_uevent_exit();
1da177e4
LT
233
234 _major = 0;
235
236 DMINFO("cleaned up");
237}
238
b9249e55 239static int (*_inits[])(void) __initdata = {
1da177e4
LT
240 local_init,
241 dm_target_init,
242 dm_linear_init,
243 dm_stripe_init,
945fa4d2 244 dm_kcopyd_init,
1da177e4
LT
245 dm_interface_init,
246};
247
b9249e55 248static void (*_exits[])(void) = {
1da177e4
LT
249 local_exit,
250 dm_target_exit,
251 dm_linear_exit,
252 dm_stripe_exit,
945fa4d2 253 dm_kcopyd_exit,
1da177e4
LT
254 dm_interface_exit,
255};
256
257static int __init dm_init(void)
258{
259 const int count = ARRAY_SIZE(_inits);
260
261 int r, i;
262
263 for (i = 0; i < count; i++) {
264 r = _inits[i]();
265 if (r)
266 goto bad;
267 }
268
269 return 0;
270
271 bad:
272 while (i--)
273 _exits[i]();
274
275 return r;
276}
277
278static void __exit dm_exit(void)
279{
280 int i = ARRAY_SIZE(_exits);
281
282 while (i--)
283 _exits[i]();
284}
285
286/*
287 * Block device functions
288 */
fe5f9f2c 289static int dm_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4
LT
290{
291 struct mapped_device *md;
292
fba9f90e
JM
293 spin_lock(&_minor_lock);
294
fe5f9f2c 295 md = bdev->bd_disk->private_data;
fba9f90e
JM
296 if (!md)
297 goto out;
298
5c6bd75d
AK
299 if (test_bit(DMF_FREEING, &md->flags) ||
300 test_bit(DMF_DELETING, &md->flags)) {
fba9f90e
JM
301 md = NULL;
302 goto out;
303 }
304
1da177e4 305 dm_get(md);
5c6bd75d 306 atomic_inc(&md->open_count);
fba9f90e
JM
307
308out:
309 spin_unlock(&_minor_lock);
310
311 return md ? 0 : -ENXIO;
1da177e4
LT
312}
313
fe5f9f2c 314static int dm_blk_close(struct gendisk *disk, fmode_t mode)
1da177e4 315{
fe5f9f2c 316 struct mapped_device *md = disk->private_data;
5c6bd75d 317 atomic_dec(&md->open_count);
1da177e4
LT
318 dm_put(md);
319 return 0;
320}
321
5c6bd75d
AK
322int dm_open_count(struct mapped_device *md)
323{
324 return atomic_read(&md->open_count);
325}
326
327/*
328 * Guarantees nothing is using the device before it's deleted.
329 */
330int dm_lock_for_deletion(struct mapped_device *md)
331{
332 int r = 0;
333
334 spin_lock(&_minor_lock);
335
336 if (dm_open_count(md))
337 r = -EBUSY;
338 else
339 set_bit(DMF_DELETING, &md->flags);
340
341 spin_unlock(&_minor_lock);
342
343 return r;
344}
345
3ac51e74
DW
346static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
347{
348 struct mapped_device *md = bdev->bd_disk->private_data;
349
350 return dm_get_geometry(md, geo);
351}
352
fe5f9f2c 353static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
aa129a22
MB
354 unsigned int cmd, unsigned long arg)
355{
fe5f9f2c
AV
356 struct mapped_device *md = bdev->bd_disk->private_data;
357 struct dm_table *map = dm_get_table(md);
aa129a22
MB
358 struct dm_target *tgt;
359 int r = -ENOTTY;
360
aa129a22
MB
361 if (!map || !dm_table_get_size(map))
362 goto out;
363
364 /* We only support devices that have a single target */
365 if (dm_table_get_num_targets(map) != 1)
366 goto out;
367
368 tgt = dm_table_get_target(map, 0);
369
370 if (dm_suspended(md)) {
371 r = -EAGAIN;
372 goto out;
373 }
374
375 if (tgt->type->ioctl)
647b3d00 376 r = tgt->type->ioctl(tgt, cmd, arg);
aa129a22
MB
377
378out:
379 dm_table_put(map);
380
aa129a22
MB
381 return r;
382}
383
028867ac 384static struct dm_io *alloc_io(struct mapped_device *md)
1da177e4
LT
385{
386 return mempool_alloc(md->io_pool, GFP_NOIO);
387}
388
028867ac 389static void free_io(struct mapped_device *md, struct dm_io *io)
1da177e4
LT
390{
391 mempool_free(io, md->io_pool);
392}
393
028867ac 394static struct dm_target_io *alloc_tio(struct mapped_device *md)
1da177e4
LT
395{
396 return mempool_alloc(md->tio_pool, GFP_NOIO);
397}
398
028867ac 399static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
1da177e4
LT
400{
401 mempool_free(tio, md->tio_pool);
402}
403
3eaf840e
JNN
404static void start_io_acct(struct dm_io *io)
405{
406 struct mapped_device *md = io->md;
c9959059 407 int cpu;
3eaf840e
JNN
408
409 io->start_time = jiffies;
410
074a7aca
TH
411 cpu = part_stat_lock();
412 part_round_stats(cpu, &dm_disk(md)->part0);
413 part_stat_unlock();
414 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
3eaf840e
JNN
415}
416
d221d2e7 417static void end_io_acct(struct dm_io *io)
3eaf840e
JNN
418{
419 struct mapped_device *md = io->md;
420 struct bio *bio = io->bio;
421 unsigned long duration = jiffies - io->start_time;
c9959059 422 int pending, cpu;
3eaf840e
JNN
423 int rw = bio_data_dir(bio);
424
074a7aca
TH
425 cpu = part_stat_lock();
426 part_round_stats(cpu, &dm_disk(md)->part0);
427 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
428 part_stat_unlock();
3eaf840e 429
af7e466a
MP
430 /*
431 * After this is decremented the bio must not be touched if it is
432 * a barrier.
433 */
074a7aca
TH
434 dm_disk(md)->part0.in_flight = pending =
435 atomic_dec_return(&md->pending);
3eaf840e 436
d221d2e7
MP
437 /* nudge anyone waiting on suspend queue */
438 if (!pending)
439 wake_up(&md->wait);
3eaf840e
JNN
440}
441
1da177e4
LT
442/*
443 * Add the bio to the list of deferred io.
444 */
92c63902 445static void queue_io(struct mapped_device *md, struct bio *bio)
1da177e4 446{
2ca3310e 447 down_write(&md->io_lock);
1da177e4 448
022c2611 449 spin_lock_irq(&md->deferred_lock);
1da177e4 450 bio_list_add(&md->deferred, bio);
022c2611 451 spin_unlock_irq(&md->deferred_lock);
1da177e4 452
92c63902
MP
453 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
454 queue_work(md->wq, &md->work);
455
2ca3310e 456 up_write(&md->io_lock);
1da177e4
LT
457}
458
459/*
460 * Everyone (including functions in this file), should use this
461 * function to access the md->map field, and make sure they call
462 * dm_table_put() when finished.
463 */
464struct dm_table *dm_get_table(struct mapped_device *md)
465{
466 struct dm_table *t;
467
468 read_lock(&md->map_lock);
469 t = md->map;
470 if (t)
471 dm_table_get(t);
472 read_unlock(&md->map_lock);
473
474 return t;
475}
476
3ac51e74
DW
477/*
478 * Get the geometry associated with a dm device
479 */
480int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
481{
482 *geo = md->geometry;
483
484 return 0;
485}
486
487/*
488 * Set the geometry of a device.
489 */
490int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
491{
492 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
493
494 if (geo->start > sz) {
495 DMWARN("Start sector is beyond the geometry limits.");
496 return -EINVAL;
497 }
498
499 md->geometry = *geo;
500
501 return 0;
502}
503
1da177e4
LT
504/*-----------------------------------------------------------------
505 * CRUD START:
506 * A more elegant soln is in the works that uses the queue
507 * merge fn, unfortunately there are a couple of changes to
508 * the block layer that I want to make for this. So in the
509 * interests of getting something for people to use I give
510 * you this clearly demarcated crap.
511 *---------------------------------------------------------------*/
512
2e93ccc1
KU
513static int __noflush_suspending(struct mapped_device *md)
514{
515 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
516}
517
1da177e4
LT
518/*
519 * Decrements the number of outstanding ios that a bio has been
520 * cloned into, completing the original io if necc.
521 */
858119e1 522static void dec_pending(struct dm_io *io, int error)
1da177e4 523{
2e93ccc1 524 unsigned long flags;
b35f8caa
MB
525 int io_error;
526 struct bio *bio;
527 struct mapped_device *md = io->md;
2e93ccc1
KU
528
529 /* Push-back supersedes any I/O errors */
b35f8caa 530 if (error && !(io->error > 0 && __noflush_suspending(md)))
1da177e4
LT
531 io->error = error;
532
533 if (atomic_dec_and_test(&io->io_count)) {
2e93ccc1
KU
534 if (io->error == DM_ENDIO_REQUEUE) {
535 /*
536 * Target requested pushing back the I/O.
2e93ccc1 537 */
022c2611 538 spin_lock_irqsave(&md->deferred_lock, flags);
2761e95f
MP
539 if (__noflush_suspending(md)) {
540 if (!bio_barrier(io->bio))
541 bio_list_add_head(&md->deferred,
542 io->bio);
543 } else
2e93ccc1
KU
544 /* noflush suspend was interrupted. */
545 io->error = -EIO;
022c2611 546 spin_unlock_irqrestore(&md->deferred_lock, flags);
2e93ccc1
KU
547 }
548
b35f8caa
MB
549 io_error = io->error;
550 bio = io->bio;
2e93ccc1 551
af7e466a
MP
552 if (bio_barrier(bio)) {
553 /*
554 * There can be just one barrier request so we use
555 * a per-device variable for error reporting.
556 * Note that you can't touch the bio after end_io_acct
557 */
fdb9572b 558 if (!md->barrier_error && io_error != -EOPNOTSUPP)
5aa2781d 559 md->barrier_error = io_error;
af7e466a
MP
560 end_io_acct(io);
561 } else {
562 end_io_acct(io);
b35f8caa 563
af7e466a
MP
564 if (io_error != DM_ENDIO_REQUEUE) {
565 trace_block_bio_complete(md->queue, bio);
2056a782 566
af7e466a
MP
567 bio_endio(bio, io_error);
568 }
b35f8caa 569 }
af7e466a
MP
570
571 free_io(md, io);
1da177e4
LT
572 }
573}
574
6712ecf8 575static void clone_endio(struct bio *bio, int error)
1da177e4
LT
576{
577 int r = 0;
028867ac 578 struct dm_target_io *tio = bio->bi_private;
b35f8caa 579 struct dm_io *io = tio->io;
9faf400f 580 struct mapped_device *md = tio->io->md;
1da177e4
LT
581 dm_endio_fn endio = tio->ti->type->end_io;
582
1da177e4
LT
583 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
584 error = -EIO;
585
586 if (endio) {
587 r = endio(tio->ti, bio, error, &tio->info);
2e93ccc1
KU
588 if (r < 0 || r == DM_ENDIO_REQUEUE)
589 /*
590 * error and requeue request are handled
591 * in dec_pending().
592 */
1da177e4 593 error = r;
45cbcd79
KU
594 else if (r == DM_ENDIO_INCOMPLETE)
595 /* The target will handle the io */
6712ecf8 596 return;
45cbcd79
KU
597 else if (r) {
598 DMWARN("unimplemented target endio return value: %d", r);
599 BUG();
600 }
1da177e4
LT
601 }
602
9faf400f
SB
603 /*
604 * Store md for cleanup instead of tio which is about to get freed.
605 */
606 bio->bi_private = md->bs;
607
9faf400f 608 free_tio(md, tio);
b35f8caa
MB
609 bio_put(bio);
610 dec_pending(io, error);
1da177e4
LT
611}
612
613static sector_t max_io_len(struct mapped_device *md,
614 sector_t sector, struct dm_target *ti)
615{
616 sector_t offset = sector - ti->begin;
617 sector_t len = ti->len - offset;
618
619 /*
620 * Does the target need to split even further ?
621 */
622 if (ti->split_io) {
623 sector_t boundary;
624 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
625 - offset;
626 if (len > boundary)
627 len = boundary;
628 }
629
630 return len;
631}
632
633static void __map_bio(struct dm_target *ti, struct bio *clone,
028867ac 634 struct dm_target_io *tio)
1da177e4
LT
635{
636 int r;
2056a782 637 sector_t sector;
9faf400f 638 struct mapped_device *md;
1da177e4 639
1da177e4
LT
640 clone->bi_end_io = clone_endio;
641 clone->bi_private = tio;
642
643 /*
644 * Map the clone. If r == 0 we don't need to do
645 * anything, the target has assumed ownership of
646 * this io.
647 */
648 atomic_inc(&tio->io->io_count);
2056a782 649 sector = clone->bi_sector;
1da177e4 650 r = ti->type->map(ti, clone, &tio->info);
45cbcd79 651 if (r == DM_MAPIO_REMAPPED) {
1da177e4 652 /* the bio has been remapped so dispatch it */
2056a782 653
5f3ea37c 654 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
22a7c31a 655 tio->io->bio->bi_bdev->bd_dev, sector);
2056a782 656
1da177e4 657 generic_make_request(clone);
2e93ccc1
KU
658 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
659 /* error the io and bail out, or requeue it if needed */
9faf400f
SB
660 md = tio->io->md;
661 dec_pending(tio->io, r);
662 /*
663 * Store bio_set for cleanup.
664 */
665 clone->bi_private = md->bs;
1da177e4 666 bio_put(clone);
9faf400f 667 free_tio(md, tio);
45cbcd79
KU
668 } else if (r) {
669 DMWARN("unimplemented target map return value: %d", r);
670 BUG();
1da177e4
LT
671 }
672}
673
674struct clone_info {
675 struct mapped_device *md;
676 struct dm_table *map;
677 struct bio *bio;
678 struct dm_io *io;
679 sector_t sector;
680 sector_t sector_count;
681 unsigned short idx;
682};
683
3676347a
PO
684static void dm_bio_destructor(struct bio *bio)
685{
9faf400f
SB
686 struct bio_set *bs = bio->bi_private;
687
688 bio_free(bio, bs);
3676347a
PO
689}
690
1da177e4
LT
691/*
692 * Creates a little bio that is just does part of a bvec.
693 */
694static struct bio *split_bvec(struct bio *bio, sector_t sector,
695 unsigned short idx, unsigned int offset,
9faf400f 696 unsigned int len, struct bio_set *bs)
1da177e4
LT
697{
698 struct bio *clone;
699 struct bio_vec *bv = bio->bi_io_vec + idx;
700
9faf400f 701 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
3676347a 702 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
703 *clone->bi_io_vec = *bv;
704
705 clone->bi_sector = sector;
706 clone->bi_bdev = bio->bi_bdev;
af7e466a 707 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
1da177e4
LT
708 clone->bi_vcnt = 1;
709 clone->bi_size = to_bytes(len);
710 clone->bi_io_vec->bv_offset = offset;
711 clone->bi_io_vec->bv_len = clone->bi_size;
f3e1d26e 712 clone->bi_flags |= 1 << BIO_CLONED;
1da177e4 713
9c47008d
MP
714 if (bio_integrity(bio)) {
715 bio_integrity_clone(clone, bio, GFP_NOIO);
716 bio_integrity_trim(clone,
717 bio_sector_offset(bio, idx, offset), len);
718 }
719
1da177e4
LT
720 return clone;
721}
722
723/*
724 * Creates a bio that consists of range of complete bvecs.
725 */
726static struct bio *clone_bio(struct bio *bio, sector_t sector,
727 unsigned short idx, unsigned short bv_count,
9faf400f 728 unsigned int len, struct bio_set *bs)
1da177e4
LT
729{
730 struct bio *clone;
731
9faf400f
SB
732 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
733 __bio_clone(clone, bio);
af7e466a 734 clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
9faf400f 735 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
736 clone->bi_sector = sector;
737 clone->bi_idx = idx;
738 clone->bi_vcnt = idx + bv_count;
739 clone->bi_size = to_bytes(len);
740 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
741
9c47008d
MP
742 if (bio_integrity(bio)) {
743 bio_integrity_clone(clone, bio, GFP_NOIO);
744
745 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
746 bio_integrity_trim(clone,
747 bio_sector_offset(bio, idx, 0), len);
748 }
749
1da177e4
LT
750 return clone;
751}
752
512875bd 753static int __clone_and_map(struct clone_info *ci)
1da177e4
LT
754{
755 struct bio *clone, *bio = ci->bio;
512875bd
JN
756 struct dm_target *ti;
757 sector_t len = 0, max;
028867ac 758 struct dm_target_io *tio;
1da177e4 759
512875bd
JN
760 ti = dm_table_find_target(ci->map, ci->sector);
761 if (!dm_target_is_valid(ti))
762 return -EIO;
763
764 max = max_io_len(ci->md, ci->sector, ti);
765
1da177e4
LT
766 /*
767 * Allocate a target io object.
768 */
769 tio = alloc_tio(ci->md);
770 tio->io = ci->io;
771 tio->ti = ti;
772 memset(&tio->info, 0, sizeof(tio->info));
773
774 if (ci->sector_count <= max) {
775 /*
776 * Optimise for the simple case where we can do all of
777 * the remaining io with a single clone.
778 */
779 clone = clone_bio(bio, ci->sector, ci->idx,
9faf400f
SB
780 bio->bi_vcnt - ci->idx, ci->sector_count,
781 ci->md->bs);
1da177e4
LT
782 __map_bio(ti, clone, tio);
783 ci->sector_count = 0;
784
785 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
786 /*
787 * There are some bvecs that don't span targets.
788 * Do as many of these as possible.
789 */
790 int i;
791 sector_t remaining = max;
792 sector_t bv_len;
793
794 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
795 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
796
797 if (bv_len > remaining)
798 break;
799
800 remaining -= bv_len;
801 len += bv_len;
802 }
803
9faf400f
SB
804 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
805 ci->md->bs);
1da177e4
LT
806 __map_bio(ti, clone, tio);
807
808 ci->sector += len;
809 ci->sector_count -= len;
810 ci->idx = i;
811
812 } else {
813 /*
d2044a94 814 * Handle a bvec that must be split between two or more targets.
1da177e4
LT
815 */
816 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
d2044a94
AK
817 sector_t remaining = to_sector(bv->bv_len);
818 unsigned int offset = 0;
1da177e4 819
d2044a94
AK
820 do {
821 if (offset) {
822 ti = dm_table_find_target(ci->map, ci->sector);
512875bd
JN
823 if (!dm_target_is_valid(ti))
824 return -EIO;
825
d2044a94 826 max = max_io_len(ci->md, ci->sector, ti);
1da177e4 827
d2044a94
AK
828 tio = alloc_tio(ci->md);
829 tio->io = ci->io;
830 tio->ti = ti;
831 memset(&tio->info, 0, sizeof(tio->info));
832 }
833
834 len = min(remaining, max);
835
836 clone = split_bvec(bio, ci->sector, ci->idx,
9faf400f
SB
837 bv->bv_offset + offset, len,
838 ci->md->bs);
d2044a94
AK
839
840 __map_bio(ti, clone, tio);
841
842 ci->sector += len;
843 ci->sector_count -= len;
844 offset += to_bytes(len);
845 } while (remaining -= len);
1da177e4 846
1da177e4
LT
847 ci->idx++;
848 }
512875bd
JN
849
850 return 0;
1da177e4
LT
851}
852
853/*
8a53c28d 854 * Split the bio into several clones and submit it to targets.
1da177e4 855 */
f0b9a450 856static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1da177e4
LT
857{
858 struct clone_info ci;
512875bd 859 int error = 0;
1da177e4
LT
860
861 ci.map = dm_get_table(md);
f0b9a450 862 if (unlikely(!ci.map)) {
af7e466a
MP
863 if (!bio_barrier(bio))
864 bio_io_error(bio);
865 else
5aa2781d
MP
866 if (!md->barrier_error)
867 md->barrier_error = -EIO;
f0b9a450
MP
868 return;
869 }
692d0eb9 870
1da177e4
LT
871 ci.md = md;
872 ci.bio = bio;
873 ci.io = alloc_io(md);
874 ci.io->error = 0;
875 atomic_set(&ci.io->io_count, 1);
876 ci.io->bio = bio;
877 ci.io->md = md;
878 ci.sector = bio->bi_sector;
879 ci.sector_count = bio_sectors(bio);
880 ci.idx = bio->bi_idx;
881
3eaf840e 882 start_io_acct(ci.io);
512875bd
JN
883 while (ci.sector_count && !error)
884 error = __clone_and_map(&ci);
1da177e4
LT
885
886 /* drop the extra reference count */
512875bd 887 dec_pending(ci.io, error);
1da177e4
LT
888 dm_table_put(ci.map);
889}
890/*-----------------------------------------------------------------
891 * CRUD END
892 *---------------------------------------------------------------*/
893
f6fccb12
MB
894static int dm_merge_bvec(struct request_queue *q,
895 struct bvec_merge_data *bvm,
896 struct bio_vec *biovec)
897{
898 struct mapped_device *md = q->queuedata;
899 struct dm_table *map = dm_get_table(md);
900 struct dm_target *ti;
901 sector_t max_sectors;
5037108a 902 int max_size = 0;
f6fccb12
MB
903
904 if (unlikely(!map))
5037108a 905 goto out;
f6fccb12
MB
906
907 ti = dm_table_find_target(map, bvm->bi_sector);
b01cd5ac
MP
908 if (!dm_target_is_valid(ti))
909 goto out_table;
f6fccb12
MB
910
911 /*
912 * Find maximum amount of I/O that won't need splitting
913 */
914 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
915 (sector_t) BIO_MAX_SECTORS);
916 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
917 if (max_size < 0)
918 max_size = 0;
919
920 /*
921 * merge_bvec_fn() returns number of bytes
922 * it can accept at this offset
923 * max is precomputed maximal io size
924 */
925 if (max_size && ti->type->merge)
926 max_size = ti->type->merge(ti, bvm, biovec, max_size);
8cbeb67a
MP
927 /*
928 * If the target doesn't support merge method and some of the devices
929 * provided their merge_bvec method (we know this by looking at
930 * queue_max_hw_sectors), then we can't allow bios with multiple vector
931 * entries. So always set max_size to 0, and the code below allows
932 * just one page.
933 */
934 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
935
936 max_size = 0;
f6fccb12 937
b01cd5ac 938out_table:
5037108a
MP
939 dm_table_put(map);
940
941out:
f6fccb12
MB
942 /*
943 * Always allow an entire first page
944 */
945 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
946 max_size = biovec->bv_len;
947
f6fccb12
MB
948 return max_size;
949}
950
1da177e4
LT
951/*
952 * The request function that just remaps the bio built up by
953 * dm_merge_bvec.
954 */
165125e1 955static int dm_request(struct request_queue *q, struct bio *bio)
1da177e4 956{
12f03a49 957 int rw = bio_data_dir(bio);
1da177e4 958 struct mapped_device *md = q->queuedata;
c9959059 959 int cpu;
1da177e4 960
2ca3310e 961 down_read(&md->io_lock);
1da177e4 962
074a7aca
TH
963 cpu = part_stat_lock();
964 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
965 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
966 part_stat_unlock();
12f03a49 967
1da177e4 968 /*
1eb787ec
AK
969 * If we're suspended or the thread is processing barriers
970 * we have to queue this io for later.
1da177e4 971 */
af7e466a
MP
972 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
973 unlikely(bio_barrier(bio))) {
2ca3310e 974 up_read(&md->io_lock);
1da177e4 975
54d9a1b4
AK
976 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
977 bio_rw(bio) == READA) {
978 bio_io_error(bio);
979 return 0;
980 }
1da177e4 981
92c63902 982 queue_io(md, bio);
1da177e4 983
92c63902 984 return 0;
1da177e4
LT
985 }
986
f0b9a450 987 __split_and_process_bio(md, bio);
2ca3310e 988 up_read(&md->io_lock);
f0b9a450 989 return 0;
1da177e4
LT
990}
991
165125e1 992static void dm_unplug_all(struct request_queue *q)
1da177e4
LT
993{
994 struct mapped_device *md = q->queuedata;
995 struct dm_table *map = dm_get_table(md);
996
997 if (map) {
998 dm_table_unplug_all(map);
999 dm_table_put(map);
1000 }
1001}
1002
1003static int dm_any_congested(void *congested_data, int bdi_bits)
1004{
8a57dfc6
CS
1005 int r = bdi_bits;
1006 struct mapped_device *md = congested_data;
1007 struct dm_table *map;
1da177e4 1008
1eb787ec 1009 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
8a57dfc6
CS
1010 map = dm_get_table(md);
1011 if (map) {
1012 r = dm_table_any_congested(map, bdi_bits);
1013 dm_table_put(map);
1014 }
1015 }
1016
1da177e4
LT
1017 return r;
1018}
1019
1020/*-----------------------------------------------------------------
1021 * An IDR is used to keep track of allocated minor numbers.
1022 *---------------------------------------------------------------*/
1da177e4
LT
1023static DEFINE_IDR(_minor_idr);
1024
2b06cfff 1025static void free_minor(int minor)
1da177e4 1026{
f32c10b0 1027 spin_lock(&_minor_lock);
1da177e4 1028 idr_remove(&_minor_idr, minor);
f32c10b0 1029 spin_unlock(&_minor_lock);
1da177e4
LT
1030}
1031
1032/*
1033 * See if the device with a specific minor # is free.
1034 */
cf13ab8e 1035static int specific_minor(int minor)
1da177e4
LT
1036{
1037 int r, m;
1038
1039 if (minor >= (1 << MINORBITS))
1040 return -EINVAL;
1041
62f75c2f
JM
1042 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1043 if (!r)
1044 return -ENOMEM;
1045
f32c10b0 1046 spin_lock(&_minor_lock);
1da177e4
LT
1047
1048 if (idr_find(&_minor_idr, minor)) {
1049 r = -EBUSY;
1050 goto out;
1051 }
1052
ba61fdd1 1053 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
62f75c2f 1054 if (r)
1da177e4 1055 goto out;
1da177e4
LT
1056
1057 if (m != minor) {
1058 idr_remove(&_minor_idr, m);
1059 r = -EBUSY;
1060 goto out;
1061 }
1062
1063out:
f32c10b0 1064 spin_unlock(&_minor_lock);
1da177e4
LT
1065 return r;
1066}
1067
cf13ab8e 1068static int next_free_minor(int *minor)
1da177e4 1069{
2b06cfff 1070 int r, m;
1da177e4 1071
1da177e4 1072 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
62f75c2f
JM
1073 if (!r)
1074 return -ENOMEM;
1075
f32c10b0 1076 spin_lock(&_minor_lock);
1da177e4 1077
ba61fdd1 1078 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
cf13ab8e 1079 if (r)
1da177e4 1080 goto out;
1da177e4
LT
1081
1082 if (m >= (1 << MINORBITS)) {
1083 idr_remove(&_minor_idr, m);
1084 r = -ENOSPC;
1085 goto out;
1086 }
1087
1088 *minor = m;
1089
1090out:
f32c10b0 1091 spin_unlock(&_minor_lock);
1da177e4
LT
1092 return r;
1093}
1094
1095static struct block_device_operations dm_blk_dops;
1096
53d5914f
MP
1097static void dm_wq_work(struct work_struct *work);
1098
1da177e4
LT
1099/*
1100 * Allocate and initialise a blank device with a given minor.
1101 */
2b06cfff 1102static struct mapped_device *alloc_dev(int minor)
1da177e4
LT
1103{
1104 int r;
cf13ab8e 1105 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
ba61fdd1 1106 void *old_md;
1da177e4
LT
1107
1108 if (!md) {
1109 DMWARN("unable to allocate device, out of memory.");
1110 return NULL;
1111 }
1112
10da4f79 1113 if (!try_module_get(THIS_MODULE))
6ed7ade8 1114 goto bad_module_get;
10da4f79 1115
1da177e4 1116 /* get a minor number for the dev */
2b06cfff 1117 if (minor == DM_ANY_MINOR)
cf13ab8e 1118 r = next_free_minor(&minor);
2b06cfff 1119 else
cf13ab8e 1120 r = specific_minor(minor);
1da177e4 1121 if (r < 0)
6ed7ade8 1122 goto bad_minor;
1da177e4 1123
2ca3310e 1124 init_rwsem(&md->io_lock);
e61290a4 1125 mutex_init(&md->suspend_lock);
022c2611 1126 spin_lock_init(&md->deferred_lock);
1da177e4
LT
1127 rwlock_init(&md->map_lock);
1128 atomic_set(&md->holders, 1);
5c6bd75d 1129 atomic_set(&md->open_count, 0);
1da177e4 1130 atomic_set(&md->event_nr, 0);
7a8c3d3b
MA
1131 atomic_set(&md->uevent_seq, 0);
1132 INIT_LIST_HEAD(&md->uevent_list);
1133 spin_lock_init(&md->uevent_lock);
1da177e4
LT
1134
1135 md->queue = blk_alloc_queue(GFP_KERNEL);
1136 if (!md->queue)
6ed7ade8 1137 goto bad_queue;
1da177e4
LT
1138
1139 md->queue->queuedata = md;
1140 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1141 md->queue->backing_dev_info.congested_data = md;
1142 blk_queue_make_request(md->queue, dm_request);
99360b4c 1143 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
daef265f 1144 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1da177e4 1145 md->queue->unplug_fn = dm_unplug_all;
f6fccb12 1146 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1da177e4 1147
93d2341c 1148 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
74859364 1149 if (!md->io_pool)
6ed7ade8 1150 goto bad_io_pool;
1da177e4 1151
93d2341c 1152 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
1da177e4 1153 if (!md->tio_pool)
6ed7ade8 1154 goto bad_tio_pool;
1da177e4 1155
bb799ca0 1156 md->bs = bioset_create(16, 0);
9faf400f
SB
1157 if (!md->bs)
1158 goto bad_no_bioset;
1159
1da177e4
LT
1160 md->disk = alloc_disk(1);
1161 if (!md->disk)
6ed7ade8 1162 goto bad_disk;
1da177e4 1163
f0b04115
JM
1164 atomic_set(&md->pending, 0);
1165 init_waitqueue_head(&md->wait);
53d5914f 1166 INIT_WORK(&md->work, dm_wq_work);
f0b04115
JM
1167 init_waitqueue_head(&md->eventq);
1168
1da177e4
LT
1169 md->disk->major = _major;
1170 md->disk->first_minor = minor;
1171 md->disk->fops = &dm_blk_dops;
1172 md->disk->queue = md->queue;
1173 md->disk->private_data = md;
1174 sprintf(md->disk->disk_name, "dm-%d", minor);
1175 add_disk(md->disk);
7e51f257 1176 format_dev_t(md->name, MKDEV(_major, minor));
1da177e4 1177
304f3f6a
MB
1178 md->wq = create_singlethread_workqueue("kdmflush");
1179 if (!md->wq)
1180 goto bad_thread;
1181
32a926da
MP
1182 md->bdev = bdget_disk(md->disk, 0);
1183 if (!md->bdev)
1184 goto bad_bdev;
1185
ba61fdd1 1186 /* Populate the mapping, nobody knows we exist yet */
f32c10b0 1187 spin_lock(&_minor_lock);
ba61fdd1 1188 old_md = idr_replace(&_minor_idr, md, minor);
f32c10b0 1189 spin_unlock(&_minor_lock);
ba61fdd1
JM
1190
1191 BUG_ON(old_md != MINOR_ALLOCED);
1192
1da177e4
LT
1193 return md;
1194
32a926da
MP
1195bad_bdev:
1196 destroy_workqueue(md->wq);
304f3f6a
MB
1197bad_thread:
1198 put_disk(md->disk);
6ed7ade8 1199bad_disk:
9faf400f 1200 bioset_free(md->bs);
6ed7ade8 1201bad_no_bioset:
1da177e4 1202 mempool_destroy(md->tio_pool);
6ed7ade8 1203bad_tio_pool:
1da177e4 1204 mempool_destroy(md->io_pool);
6ed7ade8 1205bad_io_pool:
1312f40e 1206 blk_cleanup_queue(md->queue);
6ed7ade8 1207bad_queue:
1da177e4 1208 free_minor(minor);
6ed7ade8 1209bad_minor:
10da4f79 1210 module_put(THIS_MODULE);
6ed7ade8 1211bad_module_get:
1da177e4
LT
1212 kfree(md);
1213 return NULL;
1214}
1215
ae9da83f
JN
1216static void unlock_fs(struct mapped_device *md);
1217
1da177e4
LT
1218static void free_dev(struct mapped_device *md)
1219{
f331c029 1220 int minor = MINOR(disk_devt(md->disk));
63d94e48 1221
32a926da
MP
1222 unlock_fs(md);
1223 bdput(md->bdev);
304f3f6a 1224 destroy_workqueue(md->wq);
1da177e4
LT
1225 mempool_destroy(md->tio_pool);
1226 mempool_destroy(md->io_pool);
9faf400f 1227 bioset_free(md->bs);
9c47008d 1228 blk_integrity_unregister(md->disk);
1da177e4 1229 del_gendisk(md->disk);
63d94e48 1230 free_minor(minor);
fba9f90e
JM
1231
1232 spin_lock(&_minor_lock);
1233 md->disk->private_data = NULL;
1234 spin_unlock(&_minor_lock);
1235
1da177e4 1236 put_disk(md->disk);
1312f40e 1237 blk_cleanup_queue(md->queue);
10da4f79 1238 module_put(THIS_MODULE);
1da177e4
LT
1239 kfree(md);
1240}
1241
1242/*
1243 * Bind a table to the device.
1244 */
1245static void event_callback(void *context)
1246{
7a8c3d3b
MA
1247 unsigned long flags;
1248 LIST_HEAD(uevents);
1da177e4
LT
1249 struct mapped_device *md = (struct mapped_device *) context;
1250
7a8c3d3b
MA
1251 spin_lock_irqsave(&md->uevent_lock, flags);
1252 list_splice_init(&md->uevent_list, &uevents);
1253 spin_unlock_irqrestore(&md->uevent_lock, flags);
1254
ed9e1982 1255 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
7a8c3d3b 1256
1da177e4
LT
1257 atomic_inc(&md->event_nr);
1258 wake_up(&md->eventq);
1259}
1260
4e90188b 1261static void __set_size(struct mapped_device *md, sector_t size)
1da177e4 1262{
4e90188b 1263 set_capacity(md->disk, size);
1da177e4 1264
db8fef4f
MP
1265 mutex_lock(&md->bdev->bd_inode->i_mutex);
1266 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1267 mutex_unlock(&md->bdev->bd_inode->i_mutex);
1da177e4
LT
1268}
1269
1270static int __bind(struct mapped_device *md, struct dm_table *t)
1271{
165125e1 1272 struct request_queue *q = md->queue;
1da177e4
LT
1273 sector_t size;
1274
1275 size = dm_table_get_size(t);
3ac51e74
DW
1276
1277 /*
1278 * Wipe any geometry if the size of the table changed.
1279 */
1280 if (size != get_capacity(md->disk))
1281 memset(&md->geometry, 0, sizeof(md->geometry));
1282
32a926da 1283 __set_size(md, size);
d5816876
MP
1284
1285 if (!size) {
1286 dm_table_destroy(t);
1da177e4 1287 return 0;
d5816876 1288 }
1da177e4 1289
2ca3310e
AK
1290 dm_table_event_callback(t, event_callback, md);
1291
1da177e4
LT
1292 write_lock(&md->map_lock);
1293 md->map = t;
2ca3310e 1294 dm_table_set_restrictions(t, q);
1da177e4
LT
1295 write_unlock(&md->map_lock);
1296
1da177e4
LT
1297 return 0;
1298}
1299
1300static void __unbind(struct mapped_device *md)
1301{
1302 struct dm_table *map = md->map;
1303
1304 if (!map)
1305 return;
1306
1307 dm_table_event_callback(map, NULL, NULL);
1308 write_lock(&md->map_lock);
1309 md->map = NULL;
1310 write_unlock(&md->map_lock);
d5816876 1311 dm_table_destroy(map);
1da177e4
LT
1312}
1313
1314/*
1315 * Constructor for a new device.
1316 */
2b06cfff 1317int dm_create(int minor, struct mapped_device **result)
1da177e4
LT
1318{
1319 struct mapped_device *md;
1320
2b06cfff 1321 md = alloc_dev(minor);
1da177e4
LT
1322 if (!md)
1323 return -ENXIO;
1324
784aae73
MB
1325 dm_sysfs_init(md);
1326
1da177e4
LT
1327 *result = md;
1328 return 0;
1329}
1330
637842cf 1331static struct mapped_device *dm_find_md(dev_t dev)
1da177e4
LT
1332{
1333 struct mapped_device *md;
1da177e4
LT
1334 unsigned minor = MINOR(dev);
1335
1336 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1337 return NULL;
1338
f32c10b0 1339 spin_lock(&_minor_lock);
1da177e4
LT
1340
1341 md = idr_find(&_minor_idr, minor);
fba9f90e 1342 if (md && (md == MINOR_ALLOCED ||
f331c029 1343 (MINOR(disk_devt(dm_disk(md))) != minor) ||
17b2f66f 1344 test_bit(DMF_FREEING, &md->flags))) {
637842cf 1345 md = NULL;
fba9f90e
JM
1346 goto out;
1347 }
1da177e4 1348
fba9f90e 1349out:
f32c10b0 1350 spin_unlock(&_minor_lock);
1da177e4 1351
637842cf
DT
1352 return md;
1353}
1354
d229a958
DT
1355struct mapped_device *dm_get_md(dev_t dev)
1356{
1357 struct mapped_device *md = dm_find_md(dev);
1358
1359 if (md)
1360 dm_get(md);
1361
1362 return md;
1363}
1364
9ade92a9 1365void *dm_get_mdptr(struct mapped_device *md)
637842cf 1366{
9ade92a9 1367 return md->interface_ptr;
1da177e4
LT
1368}
1369
1370void dm_set_mdptr(struct mapped_device *md, void *ptr)
1371{
1372 md->interface_ptr = ptr;
1373}
1374
1375void dm_get(struct mapped_device *md)
1376{
1377 atomic_inc(&md->holders);
1378}
1379
72d94861
AK
1380const char *dm_device_name(struct mapped_device *md)
1381{
1382 return md->name;
1383}
1384EXPORT_SYMBOL_GPL(dm_device_name);
1385
1da177e4
LT
1386void dm_put(struct mapped_device *md)
1387{
1134e5ae 1388 struct dm_table *map;
1da177e4 1389
fba9f90e
JM
1390 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1391
f32c10b0 1392 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1134e5ae 1393 map = dm_get_table(md);
f331c029
TH
1394 idr_replace(&_minor_idr, MINOR_ALLOCED,
1395 MINOR(disk_devt(dm_disk(md))));
fba9f90e 1396 set_bit(DMF_FREEING, &md->flags);
f32c10b0 1397 spin_unlock(&_minor_lock);
cf222b37 1398 if (!dm_suspended(md)) {
1da177e4
LT
1399 dm_table_presuspend_targets(map);
1400 dm_table_postsuspend_targets(map);
1401 }
784aae73 1402 dm_sysfs_exit(md);
1134e5ae 1403 dm_table_put(map);
a1b51e98 1404 __unbind(md);
1da177e4
LT
1405 free_dev(md);
1406 }
1da177e4 1407}
79eb885c 1408EXPORT_SYMBOL_GPL(dm_put);
1da177e4 1409
401600df 1410static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
46125c1c
MB
1411{
1412 int r = 0;
b44ebeb0
MP
1413 DECLARE_WAITQUEUE(wait, current);
1414
1415 dm_unplug_all(md->queue);
1416
1417 add_wait_queue(&md->wait, &wait);
46125c1c
MB
1418
1419 while (1) {
401600df 1420 set_current_state(interruptible);
46125c1c
MB
1421
1422 smp_mb();
1423 if (!atomic_read(&md->pending))
1424 break;
1425
401600df
MP
1426 if (interruptible == TASK_INTERRUPTIBLE &&
1427 signal_pending(current)) {
46125c1c
MB
1428 r = -EINTR;
1429 break;
1430 }
1431
1432 io_schedule();
1433 }
1434 set_current_state(TASK_RUNNING);
1435
b44ebeb0
MP
1436 remove_wait_queue(&md->wait, &wait);
1437
46125c1c
MB
1438 return r;
1439}
1440
531fe963 1441static void dm_flush(struct mapped_device *md)
af7e466a
MP
1442{
1443 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
af7e466a
MP
1444}
1445
1446static void process_barrier(struct mapped_device *md, struct bio *bio)
1447{
5aa2781d
MP
1448 md->barrier_error = 0;
1449
531fe963 1450 dm_flush(md);
af7e466a 1451
5aa2781d
MP
1452 if (!bio_empty_barrier(bio)) {
1453 __split_and_process_bio(md, bio);
1454 dm_flush(md);
af7e466a
MP
1455 }
1456
af7e466a 1457 if (md->barrier_error != DM_ENDIO_REQUEUE)
531fe963 1458 bio_endio(bio, md->barrier_error);
2761e95f
MP
1459 else {
1460 spin_lock_irq(&md->deferred_lock);
1461 bio_list_add_head(&md->deferred, bio);
1462 spin_unlock_irq(&md->deferred_lock);
1463 }
af7e466a
MP
1464}
1465
1da177e4
LT
1466/*
1467 * Process the deferred bios
1468 */
ef208587 1469static void dm_wq_work(struct work_struct *work)
1da177e4 1470{
ef208587
MP
1471 struct mapped_device *md = container_of(work, struct mapped_device,
1472 work);
6d6f10df 1473 struct bio *c;
1da177e4 1474
ef208587
MP
1475 down_write(&md->io_lock);
1476
3b00b203 1477 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
df12ee99
AK
1478 spin_lock_irq(&md->deferred_lock);
1479 c = bio_list_pop(&md->deferred);
1480 spin_unlock_irq(&md->deferred_lock);
1481
1482 if (!c) {
1eb787ec 1483 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
df12ee99
AK
1484 break;
1485 }
022c2611 1486
3b00b203
MP
1487 up_write(&md->io_lock);
1488
af7e466a
MP
1489 if (bio_barrier(c))
1490 process_barrier(md, c);
1491 else
1492 __split_and_process_bio(md, c);
3b00b203
MP
1493
1494 down_write(&md->io_lock);
022c2611 1495 }
73d410c0 1496
ef208587 1497 up_write(&md->io_lock);
1da177e4
LT
1498}
1499
9a1fb464 1500static void dm_queue_flush(struct mapped_device *md)
304f3f6a 1501{
3b00b203
MP
1502 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1503 smp_mb__after_clear_bit();
53d5914f 1504 queue_work(md->wq, &md->work);
304f3f6a
MB
1505}
1506
1da177e4
LT
1507/*
1508 * Swap in a new table (destroying old one).
1509 */
1510int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1511{
93c534ae 1512 int r = -EINVAL;
1da177e4 1513
e61290a4 1514 mutex_lock(&md->suspend_lock);
1da177e4
LT
1515
1516 /* device must be suspended */
cf222b37 1517 if (!dm_suspended(md))
93c534ae 1518 goto out;
1da177e4
LT
1519
1520 __unbind(md);
1521 r = __bind(md, table);
1da177e4 1522
93c534ae 1523out:
e61290a4 1524 mutex_unlock(&md->suspend_lock);
93c534ae 1525 return r;
1da177e4
LT
1526}
1527
1528/*
1529 * Functions to lock and unlock any filesystem running on the
1530 * device.
1531 */
2ca3310e 1532static int lock_fs(struct mapped_device *md)
1da177e4 1533{
e39e2e95 1534 int r;
1da177e4
LT
1535
1536 WARN_ON(md->frozen_sb);
dfbe03f6 1537
db8fef4f 1538 md->frozen_sb = freeze_bdev(md->bdev);
dfbe03f6 1539 if (IS_ERR(md->frozen_sb)) {
cf222b37 1540 r = PTR_ERR(md->frozen_sb);
e39e2e95
AK
1541 md->frozen_sb = NULL;
1542 return r;
dfbe03f6
AK
1543 }
1544
aa8d7c2f
AK
1545 set_bit(DMF_FROZEN, &md->flags);
1546
1da177e4
LT
1547 return 0;
1548}
1549
2ca3310e 1550static void unlock_fs(struct mapped_device *md)
1da177e4 1551{
aa8d7c2f
AK
1552 if (!test_bit(DMF_FROZEN, &md->flags))
1553 return;
1554
db8fef4f 1555 thaw_bdev(md->bdev, md->frozen_sb);
1da177e4 1556 md->frozen_sb = NULL;
aa8d7c2f 1557 clear_bit(DMF_FROZEN, &md->flags);
1da177e4
LT
1558}
1559
1560/*
1561 * We need to be able to change a mapping table under a mounted
1562 * filesystem. For example we might want to move some data in
1563 * the background. Before the table can be swapped with
1564 * dm_bind_table, dm_suspend must be called to flush any in
1565 * flight bios and ensure that any further io gets deferred.
1566 */
a3d77d35 1567int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1da177e4 1568{
2ca3310e 1569 struct dm_table *map = NULL;
46125c1c 1570 int r = 0;
a3d77d35 1571 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2e93ccc1 1572 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
1da177e4 1573
e61290a4 1574 mutex_lock(&md->suspend_lock);
2ca3310e 1575
73d410c0
MB
1576 if (dm_suspended(md)) {
1577 r = -EINVAL;
d287483d 1578 goto out_unlock;
73d410c0 1579 }
1da177e4
LT
1580
1581 map = dm_get_table(md);
1da177e4 1582
2e93ccc1
KU
1583 /*
1584 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1585 * This flag is cleared before dm_suspend returns.
1586 */
1587 if (noflush)
1588 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1589
cf222b37
AK
1590 /* This does not get reverted if there's an error later. */
1591 dm_table_presuspend_targets(map);
1592
32a926da
MP
1593 /*
1594 * Flush I/O to the device. noflush supersedes do_lockfs,
1595 * because lock_fs() needs to flush I/Os.
1596 */
1597 if (!noflush && do_lockfs) {
1598 r = lock_fs(md);
1599 if (r)
f431d966 1600 goto out;
aa8d7c2f 1601 }
1da177e4
LT
1602
1603 /*
3b00b203
MP
1604 * Here we must make sure that no processes are submitting requests
1605 * to target drivers i.e. no one may be executing
1606 * __split_and_process_bio. This is called from dm_request and
1607 * dm_wq_work.
1608 *
1609 * To get all processes out of __split_and_process_bio in dm_request,
1610 * we take the write lock. To prevent any process from reentering
1611 * __split_and_process_bio from dm_request, we set
1612 * DMF_QUEUE_IO_TO_THREAD.
1613 *
1614 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
1615 * and call flush_workqueue(md->wq). flush_workqueue will wait until
1616 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
1617 * further calls to __split_and_process_bio from dm_wq_work.
1da177e4 1618 */
2ca3310e 1619 down_write(&md->io_lock);
1eb787ec
AK
1620 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1621 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
2ca3310e 1622 up_write(&md->io_lock);
1da177e4 1623
3b00b203
MP
1624 flush_workqueue(md->wq);
1625
1da177e4 1626 /*
3b00b203
MP
1627 * At this point no more requests are entering target request routines.
1628 * We call dm_wait_for_completion to wait for all existing requests
1629 * to finish.
1da177e4 1630 */
401600df 1631 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
1da177e4 1632
2ca3310e 1633 down_write(&md->io_lock);
6d6f10df 1634 if (noflush)
022c2611 1635 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
94d6351e 1636 up_write(&md->io_lock);
2e93ccc1 1637
1da177e4 1638 /* were we interrupted ? */
46125c1c 1639 if (r < 0) {
9a1fb464 1640 dm_queue_flush(md);
73d410c0 1641
2ca3310e 1642 unlock_fs(md);
2e93ccc1 1643 goto out; /* pushback list is already flushed, so skip flush */
2ca3310e 1644 }
1da177e4 1645
3b00b203
MP
1646 /*
1647 * If dm_wait_for_completion returned 0, the device is completely
1648 * quiescent now. There is no request-processing activity. All new
1649 * requests are being added to md->deferred list.
1650 */
1651
cf222b37 1652 dm_table_postsuspend_targets(map);
1da177e4 1653
2ca3310e 1654 set_bit(DMF_SUSPENDED, &md->flags);
b84b0287 1655
2ca3310e
AK
1656out:
1657 dm_table_put(map);
d287483d
AK
1658
1659out_unlock:
e61290a4 1660 mutex_unlock(&md->suspend_lock);
cf222b37 1661 return r;
1da177e4
LT
1662}
1663
1664int dm_resume(struct mapped_device *md)
1665{
cf222b37 1666 int r = -EINVAL;
cf222b37 1667 struct dm_table *map = NULL;
1da177e4 1668
e61290a4 1669 mutex_lock(&md->suspend_lock);
2ca3310e 1670 if (!dm_suspended(md))
cf222b37 1671 goto out;
cf222b37
AK
1672
1673 map = dm_get_table(md);
2ca3310e 1674 if (!map || !dm_table_get_size(map))
cf222b37 1675 goto out;
1da177e4 1676
8757b776
MB
1677 r = dm_table_resume_targets(map);
1678 if (r)
1679 goto out;
2ca3310e 1680
9a1fb464 1681 dm_queue_flush(md);
2ca3310e
AK
1682
1683 unlock_fs(md);
1684
1685 clear_bit(DMF_SUSPENDED, &md->flags);
1686
1da177e4 1687 dm_table_unplug_all(map);
1da177e4 1688
69267a30 1689 dm_kobject_uevent(md);
8560ed6f 1690
cf222b37 1691 r = 0;
2ca3310e 1692
cf222b37
AK
1693out:
1694 dm_table_put(map);
e61290a4 1695 mutex_unlock(&md->suspend_lock);
2ca3310e 1696
cf222b37 1697 return r;
1da177e4
LT
1698}
1699
1700/*-----------------------------------------------------------------
1701 * Event notification.
1702 *---------------------------------------------------------------*/
69267a30
AK
1703void dm_kobject_uevent(struct mapped_device *md)
1704{
ed9e1982 1705 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
69267a30
AK
1706}
1707
7a8c3d3b
MA
1708uint32_t dm_next_uevent_seq(struct mapped_device *md)
1709{
1710 return atomic_add_return(1, &md->uevent_seq);
1711}
1712
1da177e4
LT
1713uint32_t dm_get_event_nr(struct mapped_device *md)
1714{
1715 return atomic_read(&md->event_nr);
1716}
1717
1718int dm_wait_event(struct mapped_device *md, int event_nr)
1719{
1720 return wait_event_interruptible(md->eventq,
1721 (event_nr != atomic_read(&md->event_nr)));
1722}
1723
7a8c3d3b
MA
1724void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
1725{
1726 unsigned long flags;
1727
1728 spin_lock_irqsave(&md->uevent_lock, flags);
1729 list_add(elist, &md->uevent_list);
1730 spin_unlock_irqrestore(&md->uevent_lock, flags);
1731}
1732
1da177e4
LT
1733/*
1734 * The gendisk is only valid as long as you have a reference
1735 * count on 'md'.
1736 */
1737struct gendisk *dm_disk(struct mapped_device *md)
1738{
1739 return md->disk;
1740}
1741
784aae73
MB
1742struct kobject *dm_kobject(struct mapped_device *md)
1743{
1744 return &md->kobj;
1745}
1746
1747/*
1748 * struct mapped_device should not be exported outside of dm.c
1749 * so use this check to verify that kobj is part of md structure
1750 */
1751struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1752{
1753 struct mapped_device *md;
1754
1755 md = container_of(kobj, struct mapped_device, kobj);
1756 if (&md->kobj != kobj)
1757 return NULL;
1758
4d89b7b4
MB
1759 if (test_bit(DMF_FREEING, &md->flags) ||
1760 test_bit(DMF_DELETING, &md->flags))
1761 return NULL;
1762
784aae73
MB
1763 dm_get(md);
1764 return md;
1765}
1766
1da177e4
LT
1767int dm_suspended(struct mapped_device *md)
1768{
1769 return test_bit(DMF_SUSPENDED, &md->flags);
1770}
1771
2e93ccc1
KU
1772int dm_noflush_suspending(struct dm_target *ti)
1773{
1774 struct mapped_device *md = dm_table_get_md(ti->table);
1775 int r = __noflush_suspending(md);
1776
1777 dm_put(md);
1778
1779 return r;
1780}
1781EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1782
1da177e4
LT
1783static struct block_device_operations dm_blk_dops = {
1784 .open = dm_blk_open,
1785 .release = dm_blk_close,
aa129a22 1786 .ioctl = dm_blk_ioctl,
3ac51e74 1787 .getgeo = dm_blk_getgeo,
1da177e4
LT
1788 .owner = THIS_MODULE
1789};
1790
1791EXPORT_SYMBOL(dm_get_mapinfo);
1792
1793/*
1794 * module hooks
1795 */
1796module_init(dm_init);
1797module_exit(dm_exit);
1798
1799module_param(major, uint, 0);
1800MODULE_PARM_DESC(major, "The major number of the device mapper");
1801MODULE_DESCRIPTION(DM_NAME " driver");
1802MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1803MODULE_LICENSE("GPL");