2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
9 #include "dm-bio-list.h"
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/buffer_head.h>
18 #include <linux/mempool.h>
19 #include <linux/slab.h>
20 #include <linux/idr.h>
21 #include <linux/hdreg.h>
22 #include <linux/blktrace_api.h>
24 static const char *_name = DM_NAME;
26 static unsigned int major = 0;
27 static unsigned int _major = 0;
29 static DEFINE_SPINLOCK(_minor_lock);
31 * One of these is allocated per bio.
34 struct mapped_device *md;
38 unsigned long start_time;
42 * One of these is allocated per target within a bio. Hopefully
43 * this will be simplified out one day.
51 union map_info *dm_get_mapinfo(struct bio *bio)
53 if (bio && bio->bi_private)
54 return &((struct target_io *)bio->bi_private)->info;
58 #define MINOR_ALLOCED ((void *)-1)
61 * Bits for the md->flags field.
63 #define DMF_BLOCK_IO 0
64 #define DMF_SUSPENDED 1
68 struct mapped_device {
69 struct rw_semaphore io_lock;
70 struct semaphore suspend_lock;
76 request_queue_t *queue;
83 * A list of ios that arrived while we were suspended.
86 wait_queue_head_t wait;
87 struct bio_list deferred;
90 * The current mapping.
95 * io objects are allocated from here.
104 wait_queue_head_t eventq;
107 * freeze/thaw support require holding onto a super block
109 struct super_block *frozen_sb;
110 struct block_device *suspended_bdev;
112 /* forced geometry settings */
113 struct hd_geometry geometry;
117 static kmem_cache_t *_io_cache;
118 static kmem_cache_t *_tio_cache;
120 static struct bio_set *dm_set;
122 static int __init local_init(void)
126 dm_set = bioset_create(16, 16, 4);
130 /* allocate a slab for the dm_ios */
131 _io_cache = kmem_cache_create("dm_io",
132 sizeof(struct dm_io), 0, 0, NULL, NULL);
136 /* allocate a slab for the target ios */
137 _tio_cache = kmem_cache_create("dm_tio", sizeof(struct target_io),
140 kmem_cache_destroy(_io_cache);
145 r = register_blkdev(_major, _name);
147 kmem_cache_destroy(_tio_cache);
148 kmem_cache_destroy(_io_cache);
158 static void local_exit(void)
160 kmem_cache_destroy(_tio_cache);
161 kmem_cache_destroy(_io_cache);
165 if (unregister_blkdev(_major, _name) < 0)
166 DMERR("devfs_unregister_blkdev failed");
170 DMINFO("cleaned up");
173 int (*_inits[])(void) __initdata = {
181 void (*_exits[])(void) = {
189 static int __init dm_init(void)
191 const int count = ARRAY_SIZE(_inits);
195 for (i = 0; i < count; i++) {
210 static void __exit dm_exit(void)
212 int i = ARRAY_SIZE(_exits);
219 * Block device functions
221 static int dm_blk_open(struct inode *inode, struct file *file)
223 struct mapped_device *md;
225 spin_lock(&_minor_lock);
227 md = inode->i_bdev->bd_disk->private_data;
231 if (test_bit(DMF_FREEING, &md->flags)) {
239 spin_unlock(&_minor_lock);
241 return md ? 0 : -ENXIO;
244 static int dm_blk_close(struct inode *inode, struct file *file)
246 struct mapped_device *md;
248 md = inode->i_bdev->bd_disk->private_data;
253 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
255 struct mapped_device *md = bdev->bd_disk->private_data;
257 return dm_get_geometry(md, geo);
260 static inline struct dm_io *alloc_io(struct mapped_device *md)
262 return mempool_alloc(md->io_pool, GFP_NOIO);
265 static inline void free_io(struct mapped_device *md, struct dm_io *io)
267 mempool_free(io, md->io_pool);
270 static inline struct target_io *alloc_tio(struct mapped_device *md)
272 return mempool_alloc(md->tio_pool, GFP_NOIO);
275 static inline void free_tio(struct mapped_device *md, struct target_io *tio)
277 mempool_free(tio, md->tio_pool);
280 static void start_io_acct(struct dm_io *io)
282 struct mapped_device *md = io->md;
284 io->start_time = jiffies;
287 disk_round_stats(dm_disk(md));
289 dm_disk(md)->in_flight = atomic_inc_return(&md->pending);
292 static int end_io_acct(struct dm_io *io)
294 struct mapped_device *md = io->md;
295 struct bio *bio = io->bio;
296 unsigned long duration = jiffies - io->start_time;
298 int rw = bio_data_dir(bio);
301 disk_round_stats(dm_disk(md));
303 dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending);
305 disk_stat_add(dm_disk(md), ticks[rw], duration);
311 * Add the bio to the list of deferred io.
313 static int queue_io(struct mapped_device *md, struct bio *bio)
315 down_write(&md->io_lock);
317 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
318 up_write(&md->io_lock);
322 bio_list_add(&md->deferred, bio);
324 up_write(&md->io_lock);
325 return 0; /* deferred successfully */
329 * Everyone (including functions in this file), should use this
330 * function to access the md->map field, and make sure they call
331 * dm_table_put() when finished.
333 struct dm_table *dm_get_table(struct mapped_device *md)
337 read_lock(&md->map_lock);
341 read_unlock(&md->map_lock);
347 * Get the geometry associated with a dm device
349 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
357 * Set the geometry of a device.
359 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
361 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
363 if (geo->start > sz) {
364 DMWARN("Start sector is beyond the geometry limits.");
373 /*-----------------------------------------------------------------
375 * A more elegant soln is in the works that uses the queue
376 * merge fn, unfortunately there are a couple of changes to
377 * the block layer that I want to make for this. So in the
378 * interests of getting something for people to use I give
379 * you this clearly demarcated crap.
380 *---------------------------------------------------------------*/
383 * Decrements the number of outstanding ios that a bio has been
384 * cloned into, completing the original io if necc.
386 static void dec_pending(struct dm_io *io, int error)
391 if (atomic_dec_and_test(&io->io_count)) {
393 /* nudge anyone waiting on suspend queue */
394 wake_up(&io->md->wait);
396 blk_add_trace_bio(io->md->queue, io->bio, BLK_TA_COMPLETE);
398 bio_endio(io->bio, io->bio->bi_size, io->error);
403 static int clone_endio(struct bio *bio, unsigned int done, int error)
406 struct target_io *tio = bio->bi_private;
407 struct dm_io *io = tio->io;
408 dm_endio_fn endio = tio->ti->type->end_io;
413 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
417 r = endio(tio->ti, bio, error, &tio->info);
422 /* the target wants another shot at the io */
426 free_tio(io->md, tio);
427 dec_pending(io, error);
432 static sector_t max_io_len(struct mapped_device *md,
433 sector_t sector, struct dm_target *ti)
435 sector_t offset = sector - ti->begin;
436 sector_t len = ti->len - offset;
439 * Does the target need to split even further ?
443 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
452 static void __map_bio(struct dm_target *ti, struct bio *clone,
453 struct target_io *tio)
461 BUG_ON(!clone->bi_size);
463 clone->bi_end_io = clone_endio;
464 clone->bi_private = tio;
467 * Map the clone. If r == 0 we don't need to do
468 * anything, the target has assumed ownership of
471 atomic_inc(&tio->io->io_count);
472 sector = clone->bi_sector;
473 r = ti->type->map(ti, clone, &tio->info);
475 /* the bio has been remapped so dispatch it */
477 blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
478 tio->io->bio->bi_bdev->bd_dev, sector,
481 generic_make_request(clone);
485 /* error the io and bail out */
486 struct dm_io *io = tio->io;
487 free_tio(tio->io->md, tio);
494 struct mapped_device *md;
495 struct dm_table *map;
499 sector_t sector_count;
503 static void dm_bio_destructor(struct bio *bio)
505 bio_free(bio, dm_set);
509 * Creates a little bio that is just does part of a bvec.
511 static struct bio *split_bvec(struct bio *bio, sector_t sector,
512 unsigned short idx, unsigned int offset,
516 struct bio_vec *bv = bio->bi_io_vec + idx;
518 clone = bio_alloc_bioset(GFP_NOIO, 1, dm_set);
519 clone->bi_destructor = dm_bio_destructor;
520 *clone->bi_io_vec = *bv;
522 clone->bi_sector = sector;
523 clone->bi_bdev = bio->bi_bdev;
524 clone->bi_rw = bio->bi_rw;
526 clone->bi_size = to_bytes(len);
527 clone->bi_io_vec->bv_offset = offset;
528 clone->bi_io_vec->bv_len = clone->bi_size;
534 * Creates a bio that consists of range of complete bvecs.
536 static struct bio *clone_bio(struct bio *bio, sector_t sector,
537 unsigned short idx, unsigned short bv_count,
542 clone = bio_clone(bio, GFP_NOIO);
543 clone->bi_sector = sector;
545 clone->bi_vcnt = idx + bv_count;
546 clone->bi_size = to_bytes(len);
547 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
552 static void __clone_and_map(struct clone_info *ci)
554 struct bio *clone, *bio = ci->bio;
555 struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
556 sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
557 struct target_io *tio;
560 * Allocate a target io object.
562 tio = alloc_tio(ci->md);
565 memset(&tio->info, 0, sizeof(tio->info));
567 if (ci->sector_count <= max) {
569 * Optimise for the simple case where we can do all of
570 * the remaining io with a single clone.
572 clone = clone_bio(bio, ci->sector, ci->idx,
573 bio->bi_vcnt - ci->idx, ci->sector_count);
574 __map_bio(ti, clone, tio);
575 ci->sector_count = 0;
577 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
579 * There are some bvecs that don't span targets.
580 * Do as many of these as possible.
583 sector_t remaining = max;
586 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
587 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
589 if (bv_len > remaining)
596 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len);
597 __map_bio(ti, clone, tio);
600 ci->sector_count -= len;
605 * Handle a bvec that must be split between two or more targets.
607 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
608 sector_t remaining = to_sector(bv->bv_len);
609 unsigned int offset = 0;
613 ti = dm_table_find_target(ci->map, ci->sector);
614 max = max_io_len(ci->md, ci->sector, ti);
616 tio = alloc_tio(ci->md);
619 memset(&tio->info, 0, sizeof(tio->info));
622 len = min(remaining, max);
624 clone = split_bvec(bio, ci->sector, ci->idx,
625 bv->bv_offset + offset, len);
627 __map_bio(ti, clone, tio);
630 ci->sector_count -= len;
631 offset += to_bytes(len);
632 } while (remaining -= len);
639 * Split the bio into several clones.
641 static void __split_bio(struct mapped_device *md, struct bio *bio)
643 struct clone_info ci;
645 ci.map = dm_get_table(md);
647 bio_io_error(bio, bio->bi_size);
653 ci.io = alloc_io(md);
655 atomic_set(&ci.io->io_count, 1);
658 ci.sector = bio->bi_sector;
659 ci.sector_count = bio_sectors(bio);
660 ci.idx = bio->bi_idx;
662 start_io_acct(ci.io);
663 while (ci.sector_count)
664 __clone_and_map(&ci);
666 /* drop the extra reference count */
667 dec_pending(ci.io, 0);
668 dm_table_put(ci.map);
670 /*-----------------------------------------------------------------
672 *---------------------------------------------------------------*/
675 * The request function that just remaps the bio built up by
678 static int dm_request(request_queue_t *q, struct bio *bio)
681 int rw = bio_data_dir(bio);
682 struct mapped_device *md = q->queuedata;
684 down_read(&md->io_lock);
686 disk_stat_inc(dm_disk(md), ios[rw]);
687 disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio));
690 * If we're suspended we have to queue
693 while (test_bit(DMF_BLOCK_IO, &md->flags)) {
694 up_read(&md->io_lock);
696 if (bio_rw(bio) == READA) {
697 bio_io_error(bio, bio->bi_size);
701 r = queue_io(md, bio);
703 bio_io_error(bio, bio->bi_size);
707 return 0; /* deferred successfully */
710 * We're in a while loop, because someone could suspend
711 * before we get to the following read lock.
713 down_read(&md->io_lock);
716 __split_bio(md, bio);
717 up_read(&md->io_lock);
721 static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
722 sector_t *error_sector)
724 struct mapped_device *md = q->queuedata;
725 struct dm_table *map = dm_get_table(md);
729 ret = dm_table_flush_all(map);
736 static void dm_unplug_all(request_queue_t *q)
738 struct mapped_device *md = q->queuedata;
739 struct dm_table *map = dm_get_table(md);
742 dm_table_unplug_all(map);
747 static int dm_any_congested(void *congested_data, int bdi_bits)
750 struct mapped_device *md = (struct mapped_device *) congested_data;
751 struct dm_table *map = dm_get_table(md);
753 if (!map || test_bit(DMF_BLOCK_IO, &md->flags))
756 r = dm_table_any_congested(map, bdi_bits);
762 /*-----------------------------------------------------------------
763 * An IDR is used to keep track of allocated minor numbers.
764 *---------------------------------------------------------------*/
765 static DEFINE_IDR(_minor_idr);
767 static void free_minor(unsigned int minor)
769 spin_lock(&_minor_lock);
770 idr_remove(&_minor_idr, minor);
771 spin_unlock(&_minor_lock);
775 * See if the device with a specific minor # is free.
777 static int specific_minor(struct mapped_device *md, unsigned int minor)
781 if (minor >= (1 << MINORBITS))
784 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
788 spin_lock(&_minor_lock);
790 if (idr_find(&_minor_idr, minor)) {
795 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
800 idr_remove(&_minor_idr, m);
806 spin_unlock(&_minor_lock);
810 static int next_free_minor(struct mapped_device *md, unsigned int *minor)
815 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
819 spin_lock(&_minor_lock);
821 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
826 if (m >= (1 << MINORBITS)) {
827 idr_remove(&_minor_idr, m);
835 spin_unlock(&_minor_lock);
839 static struct block_device_operations dm_blk_dops;
842 * Allocate and initialise a blank device with a given minor.
844 static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
847 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
851 DMWARN("unable to allocate device, out of memory.");
855 /* get a minor number for the dev */
856 r = persistent ? specific_minor(md, minor) : next_free_minor(md, &minor);
860 memset(md, 0, sizeof(*md));
861 init_rwsem(&md->io_lock);
862 init_MUTEX(&md->suspend_lock);
863 rwlock_init(&md->map_lock);
864 atomic_set(&md->holders, 1);
865 atomic_set(&md->event_nr, 0);
867 md->queue = blk_alloc_queue(GFP_KERNEL);
871 md->queue->queuedata = md;
872 md->queue->backing_dev_info.congested_fn = dm_any_congested;
873 md->queue->backing_dev_info.congested_data = md;
874 blk_queue_make_request(md->queue, dm_request);
875 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
876 md->queue->unplug_fn = dm_unplug_all;
877 md->queue->issue_flush_fn = dm_flush_all;
879 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
883 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
887 md->disk = alloc_disk(1);
891 md->disk->major = _major;
892 md->disk->first_minor = minor;
893 md->disk->fops = &dm_blk_dops;
894 md->disk->queue = md->queue;
895 md->disk->private_data = md;
896 sprintf(md->disk->disk_name, "dm-%d", minor);
898 format_dev_t(md->name, MKDEV(_major, minor));
900 atomic_set(&md->pending, 0);
901 init_waitqueue_head(&md->wait);
902 init_waitqueue_head(&md->eventq);
904 /* Populate the mapping, nobody knows we exist yet */
905 spin_lock(&_minor_lock);
906 old_md = idr_replace(&_minor_idr, md, minor);
907 spin_unlock(&_minor_lock);
909 BUG_ON(old_md != MINOR_ALLOCED);
914 mempool_destroy(md->tio_pool);
916 mempool_destroy(md->io_pool);
918 blk_cleanup_queue(md->queue);
925 static void free_dev(struct mapped_device *md)
927 unsigned int minor = md->disk->first_minor;
929 if (md->suspended_bdev) {
930 thaw_bdev(md->suspended_bdev, NULL);
931 bdput(md->suspended_bdev);
933 mempool_destroy(md->tio_pool);
934 mempool_destroy(md->io_pool);
935 del_gendisk(md->disk);
938 spin_lock(&_minor_lock);
939 md->disk->private_data = NULL;
940 spin_unlock(&_minor_lock);
943 blk_cleanup_queue(md->queue);
948 * Bind a table to the device.
950 static void event_callback(void *context)
952 struct mapped_device *md = (struct mapped_device *) context;
954 atomic_inc(&md->event_nr);
955 wake_up(&md->eventq);
958 static void __set_size(struct mapped_device *md, sector_t size)
960 set_capacity(md->disk, size);
962 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
963 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
964 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
967 static int __bind(struct mapped_device *md, struct dm_table *t)
969 request_queue_t *q = md->queue;
972 size = dm_table_get_size(t);
975 * Wipe any geometry if the size of the table changed.
977 if (size != get_capacity(md->disk))
978 memset(&md->geometry, 0, sizeof(md->geometry));
980 __set_size(md, size);
985 dm_table_event_callback(t, event_callback, md);
987 write_lock(&md->map_lock);
989 dm_table_set_restrictions(t, q);
990 write_unlock(&md->map_lock);
995 static void __unbind(struct mapped_device *md)
997 struct dm_table *map = md->map;
1002 dm_table_event_callback(map, NULL, NULL);
1003 write_lock(&md->map_lock);
1005 write_unlock(&md->map_lock);
1010 * Constructor for a new device.
1012 static int create_aux(unsigned int minor, int persistent,
1013 struct mapped_device **result)
1015 struct mapped_device *md;
1017 md = alloc_dev(minor, persistent);
1025 int dm_create(struct mapped_device **result)
1027 return create_aux(0, 0, result);
1030 int dm_create_with_minor(unsigned int minor, struct mapped_device **result)
1032 return create_aux(minor, 1, result);
1035 static struct mapped_device *dm_find_md(dev_t dev)
1037 struct mapped_device *md;
1038 unsigned minor = MINOR(dev);
1040 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1043 spin_lock(&_minor_lock);
1045 md = idr_find(&_minor_idr, minor);
1046 if (md && (md == MINOR_ALLOCED ||
1047 (dm_disk(md)->first_minor != minor) ||
1048 test_bit(DMF_FREEING, &md->flags))) {
1054 spin_unlock(&_minor_lock);
1059 struct mapped_device *dm_get_md(dev_t dev)
1061 struct mapped_device *md = dm_find_md(dev);
1069 void *dm_get_mdptr(struct mapped_device *md)
1071 return md->interface_ptr;
1074 void dm_set_mdptr(struct mapped_device *md, void *ptr)
1076 md->interface_ptr = ptr;
1079 void dm_get(struct mapped_device *md)
1081 atomic_inc(&md->holders);
1084 void dm_put(struct mapped_device *md)
1086 struct dm_table *map;
1088 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1090 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1091 map = dm_get_table(md);
1092 idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor);
1093 set_bit(DMF_FREEING, &md->flags);
1094 spin_unlock(&_minor_lock);
1095 if (!dm_suspended(md)) {
1096 dm_table_presuspend_targets(map);
1097 dm_table_postsuspend_targets(map);
1106 * Process the deferred bios
1108 static void __flush_deferred_io(struct mapped_device *md, struct bio *c)
1121 * Swap in a new table (destroying old one).
1123 int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1127 down(&md->suspend_lock);
1129 /* device must be suspended */
1130 if (!dm_suspended(md))
1134 r = __bind(md, table);
1137 up(&md->suspend_lock);
1142 * Functions to lock and unlock any filesystem running on the
1145 static int lock_fs(struct mapped_device *md)
1149 WARN_ON(md->frozen_sb);
1151 md->frozen_sb = freeze_bdev(md->suspended_bdev);
1152 if (IS_ERR(md->frozen_sb)) {
1153 r = PTR_ERR(md->frozen_sb);
1154 md->frozen_sb = NULL;
1158 set_bit(DMF_FROZEN, &md->flags);
1160 /* don't bdput right now, we don't want the bdev
1161 * to go away while it is locked.
1166 static void unlock_fs(struct mapped_device *md)
1168 if (!test_bit(DMF_FROZEN, &md->flags))
1171 thaw_bdev(md->suspended_bdev, md->frozen_sb);
1172 md->frozen_sb = NULL;
1173 clear_bit(DMF_FROZEN, &md->flags);
1177 * We need to be able to change a mapping table under a mounted
1178 * filesystem. For example we might want to move some data in
1179 * the background. Before the table can be swapped with
1180 * dm_bind_table, dm_suspend must be called to flush any in
1181 * flight bios and ensure that any further io gets deferred.
1183 int dm_suspend(struct mapped_device *md, int do_lockfs)
1185 struct dm_table *map = NULL;
1186 DECLARE_WAITQUEUE(wait, current);
1190 down(&md->suspend_lock);
1192 if (dm_suspended(md))
1195 map = dm_get_table(md);
1197 /* This does not get reverted if there's an error later. */
1198 dm_table_presuspend_targets(map);
1200 md->suspended_bdev = bdget_disk(md->disk, 0);
1201 if (!md->suspended_bdev) {
1202 DMWARN("bdget failed in dm_suspend");
1207 /* Flush I/O to the device. */
1215 * First we set the BLOCK_IO flag so no more ios will be mapped.
1217 down_write(&md->io_lock);
1218 set_bit(DMF_BLOCK_IO, &md->flags);
1220 add_wait_queue(&md->wait, &wait);
1221 up_write(&md->io_lock);
1225 dm_table_unplug_all(map);
1228 * Then we wait for the already mapped ios to
1232 set_current_state(TASK_INTERRUPTIBLE);
1234 if (!atomic_read(&md->pending) || signal_pending(current))
1239 set_current_state(TASK_RUNNING);
1241 down_write(&md->io_lock);
1242 remove_wait_queue(&md->wait, &wait);
1244 /* were we interrupted ? */
1246 if (atomic_read(&md->pending)) {
1247 clear_bit(DMF_BLOCK_IO, &md->flags);
1248 def = bio_list_get(&md->deferred);
1249 __flush_deferred_io(md, def);
1250 up_write(&md->io_lock);
1254 up_write(&md->io_lock);
1256 dm_table_postsuspend_targets(map);
1258 set_bit(DMF_SUSPENDED, &md->flags);
1263 if (r && md->suspended_bdev) {
1264 bdput(md->suspended_bdev);
1265 md->suspended_bdev = NULL;
1269 up(&md->suspend_lock);
1273 int dm_resume(struct mapped_device *md)
1277 struct dm_table *map = NULL;
1279 down(&md->suspend_lock);
1280 if (!dm_suspended(md))
1283 map = dm_get_table(md);
1284 if (!map || !dm_table_get_size(map))
1287 dm_table_resume_targets(map);
1289 down_write(&md->io_lock);
1290 clear_bit(DMF_BLOCK_IO, &md->flags);
1292 def = bio_list_get(&md->deferred);
1293 __flush_deferred_io(md, def);
1294 up_write(&md->io_lock);
1298 bdput(md->suspended_bdev);
1299 md->suspended_bdev = NULL;
1301 clear_bit(DMF_SUSPENDED, &md->flags);
1303 dm_table_unplug_all(map);
1309 up(&md->suspend_lock);
1314 /*-----------------------------------------------------------------
1315 * Event notification.
1316 *---------------------------------------------------------------*/
1317 uint32_t dm_get_event_nr(struct mapped_device *md)
1319 return atomic_read(&md->event_nr);
1322 int dm_wait_event(struct mapped_device *md, int event_nr)
1324 return wait_event_interruptible(md->eventq,
1325 (event_nr != atomic_read(&md->event_nr)));
1329 * The gendisk is only valid as long as you have a reference
1332 struct gendisk *dm_disk(struct mapped_device *md)
1337 int dm_suspended(struct mapped_device *md)
1339 return test_bit(DMF_SUSPENDED, &md->flags);
1342 static struct block_device_operations dm_blk_dops = {
1343 .open = dm_blk_open,
1344 .release = dm_blk_close,
1345 .getgeo = dm_blk_getgeo,
1346 .owner = THIS_MODULE
1349 EXPORT_SYMBOL(dm_get_mapinfo);
1354 module_init(dm_init);
1355 module_exit(dm_exit);
1357 module_param(major, uint, 0);
1358 MODULE_PARM_DESC(major, "The major number of the device mapper");
1359 MODULE_DESCRIPTION(DM_NAME " driver");
1360 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1361 MODULE_LICENSE("GPL");