2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <linux/hdreg.h>
21 #include <linux/delay.h>
22 #include <linux/wait.h>
24 #include <trace/events/block.h>
26 #define DM_MSG_PREFIX "core"
30 * ratelimit state to be used in DMXXX_LIMIT().
32 DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
33 DEFAULT_RATELIMIT_INTERVAL,
34 DEFAULT_RATELIMIT_BURST);
35 EXPORT_SYMBOL(dm_ratelimit_state);
39 * Cookies are numeric values sent with CHANGE and REMOVE
40 * uevents while resuming, removing or renaming the device.
42 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
43 #define DM_COOKIE_LENGTH 24
45 static const char *_name = DM_NAME;
47 static unsigned int major = 0;
48 static unsigned int _major = 0;
50 static DEFINE_IDR(_minor_idr);
52 static DEFINE_SPINLOCK(_minor_lock);
54 static void do_deferred_remove(struct work_struct *w);
56 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
58 static struct workqueue_struct *deferred_remove_workqueue;
62 * One of these is allocated per bio.
65 struct mapped_device *md;
69 unsigned long start_time;
70 spinlock_t endio_lock;
71 struct dm_stats_aux stats_aux;
75 * For request-based dm.
76 * One of these is allocated per request.
78 struct dm_rq_target_io {
79 struct mapped_device *md;
81 struct request *orig, clone;
87 * For request-based dm - the bio clones we allocate are embedded in these
90 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
91 * the bioset is created - this means the bio has to come at the end of the
94 struct dm_rq_clone_bio_info {
96 struct dm_rq_target_io *tio;
100 union map_info *dm_get_rq_mapinfo(struct request *rq)
102 if (rq && rq->end_io_data)
103 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
106 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
108 #define MINOR_ALLOCED ((void *)-1)
111 * Bits for the md->flags field.
113 #define DMF_BLOCK_IO_FOR_SUSPEND 0
114 #define DMF_SUSPENDED 1
116 #define DMF_FREEING 3
117 #define DMF_DELETING 4
118 #define DMF_NOFLUSH_SUSPENDING 5
119 #define DMF_MERGE_IS_OPTIONAL 6
120 #define DMF_DEFERRED_REMOVE 7
121 #define DMF_SUSPENDED_INTERNALLY 8
124 * A dummy definition to make RCU happy.
125 * struct dm_table should never be dereferenced in this file.
132 * Work processed by per-device workqueue.
134 struct mapped_device {
135 struct srcu_struct io_barrier;
136 struct mutex suspend_lock;
141 * The current mapping.
142 * Use dm_get_live_table{_fast} or take suspend_lock for
145 struct dm_table __rcu *map;
147 struct list_head table_devices;
148 struct mutex table_devices_lock;
152 struct request_queue *queue;
154 /* Protect queue and type against concurrent access. */
155 struct mutex type_lock;
157 struct target_type *immutable_target_type;
159 struct gendisk *disk;
165 * A list of ios that arrived while we were suspended.
168 wait_queue_head_t wait;
169 struct work_struct work;
170 struct bio_list deferred;
171 spinlock_t deferred_lock;
174 * Processing queue (flush)
176 struct workqueue_struct *wq;
179 * io objects are allocated from here.
189 wait_queue_head_t eventq;
191 struct list_head uevent_list;
192 spinlock_t uevent_lock; /* Protect access to uevent_list */
195 * freeze/thaw support require holding onto a super block
197 struct super_block *frozen_sb;
198 struct block_device *bdev;
200 /* forced geometry settings */
201 struct hd_geometry geometry;
203 /* kobject and completion */
204 struct dm_kobject_holder kobj_holder;
206 /* zero-length flush that will be cloned and submitted to targets */
207 struct bio flush_bio;
209 struct dm_stats stats;
213 * For mempools pre-allocation at the table loading time.
215 struct dm_md_mempools {
220 struct table_device {
221 struct list_head list;
223 struct dm_dev dm_dev;
226 #define RESERVED_BIO_BASED_IOS 16
227 #define RESERVED_REQUEST_BASED_IOS 256
228 #define RESERVED_MAX_IOS 1024
229 static struct kmem_cache *_io_cache;
230 static struct kmem_cache *_rq_tio_cache;
233 * Bio-based DM's mempools' reserved IOs set by the user.
235 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
238 * Request-based DM's mempools' reserved IOs set by the user.
240 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
242 static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
243 unsigned def, unsigned max)
245 unsigned ios = ACCESS_ONCE(*reserved_ios);
246 unsigned modified_ios = 0;
254 (void)cmpxchg(reserved_ios, ios, modified_ios);
261 unsigned dm_get_reserved_bio_based_ios(void)
263 return __dm_get_reserved_ios(&reserved_bio_based_ios,
264 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
266 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
268 unsigned dm_get_reserved_rq_based_ios(void)
270 return __dm_get_reserved_ios(&reserved_rq_based_ios,
271 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
273 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
275 static int __init local_init(void)
279 /* allocate a slab for the dm_ios */
280 _io_cache = KMEM_CACHE(dm_io, 0);
284 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
286 goto out_free_io_cache;
288 r = dm_uevent_init();
290 goto out_free_rq_tio_cache;
292 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
293 if (!deferred_remove_workqueue) {
295 goto out_uevent_exit;
299 r = register_blkdev(_major, _name);
301 goto out_free_workqueue;
309 destroy_workqueue(deferred_remove_workqueue);
312 out_free_rq_tio_cache:
313 kmem_cache_destroy(_rq_tio_cache);
315 kmem_cache_destroy(_io_cache);
320 static void local_exit(void)
322 flush_scheduled_work();
323 destroy_workqueue(deferred_remove_workqueue);
325 kmem_cache_destroy(_rq_tio_cache);
326 kmem_cache_destroy(_io_cache);
327 unregister_blkdev(_major, _name);
332 DMINFO("cleaned up");
335 static int (*_inits[])(void) __initdata = {
346 static void (*_exits[])(void) = {
357 static int __init dm_init(void)
359 const int count = ARRAY_SIZE(_inits);
363 for (i = 0; i < count; i++) {
378 static void __exit dm_exit(void)
380 int i = ARRAY_SIZE(_exits);
386 * Should be empty by this point.
388 idr_destroy(&_minor_idr);
392 * Block device functions
394 int dm_deleting_md(struct mapped_device *md)
396 return test_bit(DMF_DELETING, &md->flags);
399 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
401 struct mapped_device *md;
403 spin_lock(&_minor_lock);
405 md = bdev->bd_disk->private_data;
409 if (test_bit(DMF_FREEING, &md->flags) ||
410 dm_deleting_md(md)) {
416 atomic_inc(&md->open_count);
419 spin_unlock(&_minor_lock);
421 return md ? 0 : -ENXIO;
424 static void dm_blk_close(struct gendisk *disk, fmode_t mode)
426 struct mapped_device *md = disk->private_data;
428 spin_lock(&_minor_lock);
430 if (atomic_dec_and_test(&md->open_count) &&
431 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
432 queue_work(deferred_remove_workqueue, &deferred_remove_work);
436 spin_unlock(&_minor_lock);
439 int dm_open_count(struct mapped_device *md)
441 return atomic_read(&md->open_count);
445 * Guarantees nothing is using the device before it's deleted.
447 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
451 spin_lock(&_minor_lock);
453 if (dm_open_count(md)) {
456 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
457 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
460 set_bit(DMF_DELETING, &md->flags);
462 spin_unlock(&_minor_lock);
467 int dm_cancel_deferred_remove(struct mapped_device *md)
471 spin_lock(&_minor_lock);
473 if (test_bit(DMF_DELETING, &md->flags))
476 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
478 spin_unlock(&_minor_lock);
483 static void do_deferred_remove(struct work_struct *w)
485 dm_deferred_remove();
488 sector_t dm_get_size(struct mapped_device *md)
490 return get_capacity(md->disk);
493 struct request_queue *dm_get_md_queue(struct mapped_device *md)
498 struct dm_stats *dm_get_stats(struct mapped_device *md)
503 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
505 struct mapped_device *md = bdev->bd_disk->private_data;
507 return dm_get_geometry(md, geo);
510 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
511 unsigned int cmd, unsigned long arg)
513 struct mapped_device *md = bdev->bd_disk->private_data;
515 struct dm_table *map;
516 struct dm_target *tgt;
520 map = dm_get_live_table(md, &srcu_idx);
522 if (!map || !dm_table_get_size(map))
525 /* We only support devices that have a single target */
526 if (dm_table_get_num_targets(map) != 1)
529 tgt = dm_table_get_target(map, 0);
530 if (!tgt->type->ioctl)
533 if (dm_suspended_md(md)) {
538 r = tgt->type->ioctl(tgt, cmd, arg);
541 dm_put_live_table(md, srcu_idx);
543 if (r == -ENOTCONN) {
551 static struct dm_io *alloc_io(struct mapped_device *md)
553 return mempool_alloc(md->io_pool, GFP_NOIO);
556 static void free_io(struct mapped_device *md, struct dm_io *io)
558 mempool_free(io, md->io_pool);
561 static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
563 bio_put(&tio->clone);
566 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
569 return mempool_alloc(md->io_pool, gfp_mask);
572 static void free_rq_tio(struct dm_rq_target_io *tio)
574 mempool_free(tio, tio->md->io_pool);
577 static int md_in_flight(struct mapped_device *md)
579 return atomic_read(&md->pending[READ]) +
580 atomic_read(&md->pending[WRITE]);
583 static void start_io_acct(struct dm_io *io)
585 struct mapped_device *md = io->md;
586 struct bio *bio = io->bio;
588 int rw = bio_data_dir(bio);
590 io->start_time = jiffies;
592 cpu = part_stat_lock();
593 part_round_stats(cpu, &dm_disk(md)->part0);
595 atomic_set(&dm_disk(md)->part0.in_flight[rw],
596 atomic_inc_return(&md->pending[rw]));
598 if (unlikely(dm_stats_used(&md->stats)))
599 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
600 bio_sectors(bio), false, 0, &io->stats_aux);
603 static void end_io_acct(struct dm_io *io)
605 struct mapped_device *md = io->md;
606 struct bio *bio = io->bio;
607 unsigned long duration = jiffies - io->start_time;
609 int rw = bio_data_dir(bio);
611 generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
613 if (unlikely(dm_stats_used(&md->stats)))
614 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
615 bio_sectors(bio), true, duration, &io->stats_aux);
618 * After this is decremented the bio must not be touched if it is
621 pending = atomic_dec_return(&md->pending[rw]);
622 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
623 pending += atomic_read(&md->pending[rw^0x1]);
625 /* nudge anyone waiting on suspend queue */
631 * Add the bio to the list of deferred io.
633 static void queue_io(struct mapped_device *md, struct bio *bio)
637 spin_lock_irqsave(&md->deferred_lock, flags);
638 bio_list_add(&md->deferred, bio);
639 spin_unlock_irqrestore(&md->deferred_lock, flags);
640 queue_work(md->wq, &md->work);
644 * Everyone (including functions in this file), should use this
645 * function to access the md->map field, and make sure they call
646 * dm_put_live_table() when finished.
648 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
650 *srcu_idx = srcu_read_lock(&md->io_barrier);
652 return srcu_dereference(md->map, &md->io_barrier);
655 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
657 srcu_read_unlock(&md->io_barrier, srcu_idx);
660 void dm_sync_table(struct mapped_device *md)
662 synchronize_srcu(&md->io_barrier);
663 synchronize_rcu_expedited();
667 * A fast alternative to dm_get_live_table/dm_put_live_table.
668 * The caller must not block between these two functions.
670 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
673 return rcu_dereference(md->map);
676 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
682 * Open a table device so we can use it as a map destination.
684 static int open_table_device(struct table_device *td, dev_t dev,
685 struct mapped_device *md)
687 static char *_claim_ptr = "I belong to device-mapper";
688 struct block_device *bdev;
692 BUG_ON(td->dm_dev.bdev);
694 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr);
696 return PTR_ERR(bdev);
698 r = bd_link_disk_holder(bdev, dm_disk(md));
700 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
704 td->dm_dev.bdev = bdev;
709 * Close a table device that we've been using.
711 static void close_table_device(struct table_device *td, struct mapped_device *md)
713 if (!td->dm_dev.bdev)
716 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
717 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
718 td->dm_dev.bdev = NULL;
721 static struct table_device *find_table_device(struct list_head *l, dev_t dev,
723 struct table_device *td;
725 list_for_each_entry(td, l, list)
726 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
732 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
733 struct dm_dev **result) {
735 struct table_device *td;
737 mutex_lock(&md->table_devices_lock);
738 td = find_table_device(&md->table_devices, dev, mode);
740 td = kmalloc(sizeof(*td), GFP_KERNEL);
742 mutex_unlock(&md->table_devices_lock);
746 td->dm_dev.mode = mode;
747 td->dm_dev.bdev = NULL;
749 if ((r = open_table_device(td, dev, md))) {
750 mutex_unlock(&md->table_devices_lock);
755 format_dev_t(td->dm_dev.name, dev);
757 atomic_set(&td->count, 0);
758 list_add(&td->list, &md->table_devices);
760 atomic_inc(&td->count);
761 mutex_unlock(&md->table_devices_lock);
763 *result = &td->dm_dev;
766 EXPORT_SYMBOL_GPL(dm_get_table_device);
768 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
770 struct table_device *td = container_of(d, struct table_device, dm_dev);
772 mutex_lock(&md->table_devices_lock);
773 if (atomic_dec_and_test(&td->count)) {
774 close_table_device(td, md);
778 mutex_unlock(&md->table_devices_lock);
780 EXPORT_SYMBOL(dm_put_table_device);
782 static void free_table_devices(struct list_head *devices)
784 struct list_head *tmp, *next;
786 list_for_each_safe(tmp, next, devices) {
787 struct table_device *td = list_entry(tmp, struct table_device, list);
789 DMWARN("dm_destroy: %s still exists with %d references",
790 td->dm_dev.name, atomic_read(&td->count));
796 * Get the geometry associated with a dm device
798 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
806 * Set the geometry of a device.
808 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
810 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
812 if (geo->start > sz) {
813 DMWARN("Start sector is beyond the geometry limits.");
822 /*-----------------------------------------------------------------
824 * A more elegant soln is in the works that uses the queue
825 * merge fn, unfortunately there are a couple of changes to
826 * the block layer that I want to make for this. So in the
827 * interests of getting something for people to use I give
828 * you this clearly demarcated crap.
829 *---------------------------------------------------------------*/
831 static int __noflush_suspending(struct mapped_device *md)
833 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
837 * Decrements the number of outstanding ios that a bio has been
838 * cloned into, completing the original io if necc.
840 static void dec_pending(struct dm_io *io, int error)
845 struct mapped_device *md = io->md;
847 /* Push-back supersedes any I/O errors */
848 if (unlikely(error)) {
849 spin_lock_irqsave(&io->endio_lock, flags);
850 if (!(io->error > 0 && __noflush_suspending(md)))
852 spin_unlock_irqrestore(&io->endio_lock, flags);
855 if (atomic_dec_and_test(&io->io_count)) {
856 if (io->error == DM_ENDIO_REQUEUE) {
858 * Target requested pushing back the I/O.
860 spin_lock_irqsave(&md->deferred_lock, flags);
861 if (__noflush_suspending(md))
862 bio_list_add_head(&md->deferred, io->bio);
864 /* noflush suspend was interrupted. */
866 spin_unlock_irqrestore(&md->deferred_lock, flags);
869 io_error = io->error;
874 if (io_error == DM_ENDIO_REQUEUE)
877 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
879 * Preflush done for flush with data, reissue
882 bio->bi_rw &= ~REQ_FLUSH;
885 /* done with normal IO or empty flush */
886 trace_block_bio_complete(md->queue, bio, io_error);
887 bio_endio(bio, io_error);
892 static void disable_write_same(struct mapped_device *md)
894 struct queue_limits *limits = dm_get_queue_limits(md);
896 /* device doesn't really support WRITE SAME, disable it */
897 limits->max_write_same_sectors = 0;
900 static void clone_endio(struct bio *bio, int error)
903 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
904 struct dm_io *io = tio->io;
905 struct mapped_device *md = tio->io->md;
906 dm_endio_fn endio = tio->ti->type->end_io;
908 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
912 r = endio(tio->ti, bio, error);
913 if (r < 0 || r == DM_ENDIO_REQUEUE)
915 * error and requeue request are handled
919 else if (r == DM_ENDIO_INCOMPLETE)
920 /* The target will handle the io */
923 DMWARN("unimplemented target endio return value: %d", r);
928 if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
929 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
930 disable_write_same(md);
933 dec_pending(io, error);
937 * Partial completion handling for request-based dm
939 static void end_clone_bio(struct bio *clone, int error)
941 struct dm_rq_clone_bio_info *info =
942 container_of(clone, struct dm_rq_clone_bio_info, clone);
943 struct dm_rq_target_io *tio = info->tio;
944 struct bio *bio = info->orig;
945 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
951 * An error has already been detected on the request.
952 * Once error occurred, just let clone->end_io() handle
958 * Don't notice the error to the upper layer yet.
959 * The error handling decision is made by the target driver,
960 * when the request is completed.
967 * I/O for the bio successfully completed.
968 * Notice the data completion to the upper layer.
972 * bios are processed from the head of the list.
973 * So the completing bio should always be rq->bio.
974 * If it's not, something wrong is happening.
976 if (tio->orig->bio != bio)
977 DMERR("bio completion is going in the middle of the request");
980 * Update the original request.
981 * Do not use blk_end_request() here, because it may complete
982 * the original request before the clone, and break the ordering.
984 blk_update_request(tio->orig, 0, nr_bytes);
988 * Don't touch any member of the md after calling this function because
989 * the md may be freed in dm_put() at the end of this function.
990 * Or do dm_get() before calling this function and dm_put() later.
992 static void rq_completed(struct mapped_device *md, int rw, int run_queue)
994 atomic_dec(&md->pending[rw]);
996 /* nudge anyone waiting on suspend queue */
997 if (!md_in_flight(md))
1001 * Run this off this callpath, as drivers could invoke end_io while
1002 * inside their request_fn (and holding the queue lock). Calling
1003 * back into ->request_fn() could deadlock attempting to grab the
1007 blk_run_queue_async(md->queue);
1010 * dm_put() must be at the end of this function. See the comment above
1015 static void free_rq_clone(struct request *clone)
1017 struct dm_rq_target_io *tio = clone->end_io_data;
1019 blk_rq_unprep_clone(clone);
1024 * Complete the clone and the original request.
1025 * Must be called without queue lock.
1027 static void dm_end_request(struct request *clone, int error)
1029 int rw = rq_data_dir(clone);
1030 struct dm_rq_target_io *tio = clone->end_io_data;
1031 struct mapped_device *md = tio->md;
1032 struct request *rq = tio->orig;
1034 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
1035 rq->errors = clone->errors;
1036 rq->resid_len = clone->resid_len;
1040 * We are using the sense buffer of the original
1042 * So setting the length of the sense data is enough.
1044 rq->sense_len = clone->sense_len;
1047 free_rq_clone(clone);
1048 blk_end_request_all(rq, error);
1049 rq_completed(md, rw, true);
1052 static void dm_unprep_request(struct request *rq)
1054 struct request *clone = rq->special;
1057 rq->cmd_flags &= ~REQ_DONTPREP;
1059 free_rq_clone(clone);
1063 * Requeue the original request of a clone.
1065 void dm_requeue_unmapped_request(struct request *clone)
1067 int rw = rq_data_dir(clone);
1068 struct dm_rq_target_io *tio = clone->end_io_data;
1069 struct mapped_device *md = tio->md;
1070 struct request *rq = tio->orig;
1071 struct request_queue *q = rq->q;
1072 unsigned long flags;
1074 dm_unprep_request(rq);
1076 spin_lock_irqsave(q->queue_lock, flags);
1077 blk_requeue_request(q, rq);
1078 spin_unlock_irqrestore(q->queue_lock, flags);
1080 rq_completed(md, rw, 0);
1082 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
1084 static void __stop_queue(struct request_queue *q)
1089 static void stop_queue(struct request_queue *q)
1091 unsigned long flags;
1093 spin_lock_irqsave(q->queue_lock, flags);
1095 spin_unlock_irqrestore(q->queue_lock, flags);
1098 static void __start_queue(struct request_queue *q)
1100 if (blk_queue_stopped(q))
1104 static void start_queue(struct request_queue *q)
1106 unsigned long flags;
1108 spin_lock_irqsave(q->queue_lock, flags);
1110 spin_unlock_irqrestore(q->queue_lock, flags);
1113 static void dm_done(struct request *clone, int error, bool mapped)
1116 struct dm_rq_target_io *tio = clone->end_io_data;
1117 dm_request_endio_fn rq_end_io = NULL;
1120 rq_end_io = tio->ti->type->rq_end_io;
1122 if (mapped && rq_end_io)
1123 r = rq_end_io(tio->ti, clone, error, &tio->info);
1126 if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
1127 !clone->q->limits.max_write_same_sectors))
1128 disable_write_same(tio->md);
1131 /* The target wants to complete the I/O */
1132 dm_end_request(clone, r);
1133 else if (r == DM_ENDIO_INCOMPLETE)
1134 /* The target will handle the I/O */
1136 else if (r == DM_ENDIO_REQUEUE)
1137 /* The target wants to requeue the I/O */
1138 dm_requeue_unmapped_request(clone);
1140 DMWARN("unimplemented target endio return value: %d", r);
1146 * Request completion handler for request-based dm
1148 static void dm_softirq_done(struct request *rq)
1151 struct request *clone = rq->completion_data;
1152 struct dm_rq_target_io *tio = clone->end_io_data;
1154 if (rq->cmd_flags & REQ_FAILED)
1157 dm_done(clone, tio->error, mapped);
1161 * Complete the clone and the original request with the error status
1162 * through softirq context.
1164 static void dm_complete_request(struct request *clone, int error)
1166 struct dm_rq_target_io *tio = clone->end_io_data;
1167 struct request *rq = tio->orig;
1170 rq->completion_data = clone;
1171 blk_complete_request(rq);
1175 * Complete the not-mapped clone and the original request with the error status
1176 * through softirq context.
1177 * Target's rq_end_io() function isn't called.
1178 * This may be used when the target's map_rq() function fails.
1180 void dm_kill_unmapped_request(struct request *clone, int error)
1182 struct dm_rq_target_io *tio = clone->end_io_data;
1183 struct request *rq = tio->orig;
1185 rq->cmd_flags |= REQ_FAILED;
1186 dm_complete_request(clone, error);
1188 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
1191 * Called with the queue lock held
1193 static void end_clone_request(struct request *clone, int error)
1196 * For just cleaning up the information of the queue in which
1197 * the clone was dispatched.
1198 * The clone is *NOT* freed actually here because it is alloced from
1199 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1201 __blk_put_request(clone->q, clone);
1204 * Actual request completion is done in a softirq context which doesn't
1205 * hold the queue lock. Otherwise, deadlock could occur because:
1206 * - another request may be submitted by the upper level driver
1207 * of the stacking during the completion
1208 * - the submission which requires queue lock may be done
1209 * against this queue
1211 dm_complete_request(clone, error);
1215 * Return maximum size of I/O possible at the supplied sector up to the current
1218 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1220 sector_t target_offset = dm_target_offset(ti, sector);
1222 return ti->len - target_offset;
1225 static sector_t max_io_len(sector_t sector, struct dm_target *ti)
1227 sector_t len = max_io_len_target_boundary(sector, ti);
1228 sector_t offset, max_len;
1231 * Does the target need to split even further?
1233 if (ti->max_io_len) {
1234 offset = dm_target_offset(ti, sector);
1235 if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
1236 max_len = sector_div(offset, ti->max_io_len);
1238 max_len = offset & (ti->max_io_len - 1);
1239 max_len = ti->max_io_len - max_len;
1248 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1250 if (len > UINT_MAX) {
1251 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1252 (unsigned long long)len, UINT_MAX);
1253 ti->error = "Maximum size of target IO is too large";
1257 ti->max_io_len = (uint32_t) len;
1261 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1264 * A target may call dm_accept_partial_bio only from the map routine. It is
1265 * allowed for all bio types except REQ_FLUSH.
1267 * dm_accept_partial_bio informs the dm that the target only wants to process
1268 * additional n_sectors sectors of the bio and the rest of the data should be
1269 * sent in a next bio.
1271 * A diagram that explains the arithmetics:
1272 * +--------------------+---------------+-------+
1274 * +--------------------+---------------+-------+
1276 * <-------------- *tio->len_ptr --------------->
1277 * <------- bi_size ------->
1280 * Region 1 was already iterated over with bio_advance or similar function.
1281 * (it may be empty if the target doesn't use bio_advance)
1282 * Region 2 is the remaining bio size that the target wants to process.
1283 * (it may be empty if region 1 is non-empty, although there is no reason
1285 * The target requires that region 3 is to be sent in the next bio.
1287 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1288 * the partially processed part (the sum of regions 1+2) must be the same for all
1289 * copies of the bio.
1291 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1293 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1294 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1295 BUG_ON(bio->bi_rw & REQ_FLUSH);
1296 BUG_ON(bi_size > *tio->len_ptr);
1297 BUG_ON(n_sectors > bi_size);
1298 *tio->len_ptr -= bi_size - n_sectors;
1299 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1301 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1303 static void __map_bio(struct dm_target_io *tio)
1307 struct mapped_device *md;
1308 struct bio *clone = &tio->clone;
1309 struct dm_target *ti = tio->ti;
1311 clone->bi_end_io = clone_endio;
1314 * Map the clone. If r == 0 we don't need to do
1315 * anything, the target has assumed ownership of
1318 atomic_inc(&tio->io->io_count);
1319 sector = clone->bi_iter.bi_sector;
1320 r = ti->type->map(ti, clone);
1321 if (r == DM_MAPIO_REMAPPED) {
1322 /* the bio has been remapped so dispatch it */
1324 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1325 tio->io->bio->bi_bdev->bd_dev, sector);
1327 generic_make_request(clone);
1328 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1329 /* error the io and bail out, or requeue it if needed */
1331 dec_pending(tio->io, r);
1334 DMWARN("unimplemented target map return value: %d", r);
1340 struct mapped_device *md;
1341 struct dm_table *map;
1345 unsigned sector_count;
1348 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1350 bio->bi_iter.bi_sector = sector;
1351 bio->bi_iter.bi_size = to_bytes(len);
1355 * Creates a bio that consists of range of complete bvecs.
1357 static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1358 sector_t sector, unsigned len)
1360 struct bio *clone = &tio->clone;
1362 __bio_clone_fast(clone, bio);
1364 if (bio_integrity(bio))
1365 bio_integrity_clone(clone, bio, GFP_NOIO);
1367 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1368 clone->bi_iter.bi_size = to_bytes(len);
1370 if (bio_integrity(bio))
1371 bio_integrity_trim(clone, 0, len);
1374 static struct dm_target_io *alloc_tio(struct clone_info *ci,
1375 struct dm_target *ti,
1376 unsigned target_bio_nr)
1378 struct dm_target_io *tio;
1381 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
1382 tio = container_of(clone, struct dm_target_io, clone);
1386 tio->target_bio_nr = target_bio_nr;
1391 static void __clone_and_map_simple_bio(struct clone_info *ci,
1392 struct dm_target *ti,
1393 unsigned target_bio_nr, unsigned *len)
1395 struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
1396 struct bio *clone = &tio->clone;
1400 __bio_clone_fast(clone, ci->bio);
1402 bio_setup_sector(clone, ci->sector, *len);
1407 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1408 unsigned num_bios, unsigned *len)
1410 unsigned target_bio_nr;
1412 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
1413 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
1416 static int __send_empty_flush(struct clone_info *ci)
1418 unsigned target_nr = 0;
1419 struct dm_target *ti;
1421 BUG_ON(bio_has_data(ci->bio));
1422 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1423 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1428 static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1429 sector_t sector, unsigned *len)
1431 struct bio *bio = ci->bio;
1432 struct dm_target_io *tio;
1433 unsigned target_bio_nr;
1434 unsigned num_target_bios = 1;
1437 * Does the target want to receive duplicate copies of the bio?
1439 if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1440 num_target_bios = ti->num_write_bios(ti, bio);
1442 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1443 tio = alloc_tio(ci, ti, target_bio_nr);
1445 clone_bio(tio, bio, sector, *len);
1450 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
1452 static unsigned get_num_discard_bios(struct dm_target *ti)
1454 return ti->num_discard_bios;
1457 static unsigned get_num_write_same_bios(struct dm_target *ti)
1459 return ti->num_write_same_bios;
1462 typedef bool (*is_split_required_fn)(struct dm_target *ti);
1464 static bool is_split_required_for_discard(struct dm_target *ti)
1466 return ti->split_discard_bios;
1469 static int __send_changing_extent_only(struct clone_info *ci,
1470 get_num_bios_fn get_num_bios,
1471 is_split_required_fn is_split_required)
1473 struct dm_target *ti;
1478 ti = dm_table_find_target(ci->map, ci->sector);
1479 if (!dm_target_is_valid(ti))
1483 * Even though the device advertised support for this type of
1484 * request, that does not mean every target supports it, and
1485 * reconfiguration might also have changed that since the
1486 * check was performed.
1488 num_bios = get_num_bios ? get_num_bios(ti) : 0;
1492 if (is_split_required && !is_split_required(ti))
1493 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1495 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
1497 __send_duplicate_bios(ci, ti, num_bios, &len);
1500 } while (ci->sector_count -= len);
1505 static int __send_discard(struct clone_info *ci)
1507 return __send_changing_extent_only(ci, get_num_discard_bios,
1508 is_split_required_for_discard);
1511 static int __send_write_same(struct clone_info *ci)
1513 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
1517 * Select the correct strategy for processing a non-flush bio.
1519 static int __split_and_process_non_flush(struct clone_info *ci)
1521 struct bio *bio = ci->bio;
1522 struct dm_target *ti;
1525 if (unlikely(bio->bi_rw & REQ_DISCARD))
1526 return __send_discard(ci);
1527 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
1528 return __send_write_same(ci);
1530 ti = dm_table_find_target(ci->map, ci->sector);
1531 if (!dm_target_is_valid(ti))
1534 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1536 __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1539 ci->sector_count -= len;
1545 * Entry point to split a bio into clones and submit them to the targets.
1547 static void __split_and_process_bio(struct mapped_device *md,
1548 struct dm_table *map, struct bio *bio)
1550 struct clone_info ci;
1553 if (unlikely(!map)) {
1560 ci.io = alloc_io(md);
1562 atomic_set(&ci.io->io_count, 1);
1565 spin_lock_init(&ci.io->endio_lock);
1566 ci.sector = bio->bi_iter.bi_sector;
1568 start_io_acct(ci.io);
1570 if (bio->bi_rw & REQ_FLUSH) {
1571 ci.bio = &ci.md->flush_bio;
1572 ci.sector_count = 0;
1573 error = __send_empty_flush(&ci);
1574 /* dec_pending submits any data associated with flush */
1577 ci.sector_count = bio_sectors(bio);
1578 while (ci.sector_count && !error)
1579 error = __split_and_process_non_flush(&ci);
1582 /* drop the extra reference count */
1583 dec_pending(ci.io, error);
1585 /*-----------------------------------------------------------------
1587 *---------------------------------------------------------------*/
1589 static int dm_merge_bvec(struct request_queue *q,
1590 struct bvec_merge_data *bvm,
1591 struct bio_vec *biovec)
1593 struct mapped_device *md = q->queuedata;
1594 struct dm_table *map = dm_get_live_table_fast(md);
1595 struct dm_target *ti;
1596 sector_t max_sectors;
1602 ti = dm_table_find_target(map, bvm->bi_sector);
1603 if (!dm_target_is_valid(ti))
1607 * Find maximum amount of I/O that won't need splitting
1609 max_sectors = min(max_io_len(bvm->bi_sector, ti),
1610 (sector_t) queue_max_sectors(q));
1611 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1612 if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */
1616 * merge_bvec_fn() returns number of bytes
1617 * it can accept at this offset
1618 * max is precomputed maximal io size
1620 if (max_size && ti->type->merge)
1621 max_size = ti->type->merge(ti, bvm, biovec, max_size);
1623 * If the target doesn't support merge method and some of the devices
1624 * provided their merge_bvec method (we know this by looking for the
1625 * max_hw_sectors that dm_set_device_limits may set), then we can't
1626 * allow bios with multiple vector entries. So always set max_size
1627 * to 0, and the code below allows just one page.
1629 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1633 dm_put_live_table_fast(md);
1635 * Always allow an entire first page
1637 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1638 max_size = biovec->bv_len;
1644 * The request function that just remaps the bio built up by
1647 static void _dm_request(struct request_queue *q, struct bio *bio)
1649 int rw = bio_data_dir(bio);
1650 struct mapped_device *md = q->queuedata;
1652 struct dm_table *map;
1654 map = dm_get_live_table(md, &srcu_idx);
1656 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
1658 /* if we're suspended, we have to queue this io for later */
1659 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1660 dm_put_live_table(md, srcu_idx);
1662 if (bio_rw(bio) != READA)
1669 __split_and_process_bio(md, map, bio);
1670 dm_put_live_table(md, srcu_idx);
1674 int dm_request_based(struct mapped_device *md)
1676 return blk_queue_stackable(md->queue);
1679 static void dm_request(struct request_queue *q, struct bio *bio)
1681 struct mapped_device *md = q->queuedata;
1683 if (dm_request_based(md))
1684 blk_queue_bio(q, bio);
1686 _dm_request(q, bio);
1689 void dm_dispatch_request(struct request *rq)
1693 if (blk_queue_io_stat(rq->q))
1694 rq->cmd_flags |= REQ_IO_STAT;
1696 rq->start_time = jiffies;
1697 r = blk_insert_cloned_request(rq->q, rq);
1699 dm_complete_request(rq, r);
1701 EXPORT_SYMBOL_GPL(dm_dispatch_request);
1703 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1706 struct dm_rq_target_io *tio = data;
1707 struct dm_rq_clone_bio_info *info =
1708 container_of(bio, struct dm_rq_clone_bio_info, clone);
1710 info->orig = bio_orig;
1712 bio->bi_end_io = end_clone_bio;
1717 static int setup_clone(struct request *clone, struct request *rq,
1718 struct dm_rq_target_io *tio)
1722 blk_rq_init(NULL, rq);
1723 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1724 dm_rq_bio_constructor, tio);
1728 clone->cmd = rq->cmd;
1729 clone->cmd_len = rq->cmd_len;
1730 clone->sense = rq->sense;
1731 clone->end_io = end_clone_request;
1732 clone->end_io_data = tio;
1737 static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1740 struct request *clone;
1741 struct dm_rq_target_io *tio;
1743 tio = alloc_rq_tio(md, gfp_mask);
1751 memset(&tio->info, 0, sizeof(tio->info));
1753 clone = &tio->clone;
1754 if (setup_clone(clone, rq, tio)) {
1764 * Called with the queue lock held.
1766 static int dm_prep_fn(struct request_queue *q, struct request *rq)
1768 struct mapped_device *md = q->queuedata;
1769 struct request *clone;
1771 if (unlikely(rq->special)) {
1772 DMWARN("Already has something in rq->special.");
1773 return BLKPREP_KILL;
1776 clone = clone_rq(rq, md, GFP_ATOMIC);
1778 return BLKPREP_DEFER;
1780 rq->special = clone;
1781 rq->cmd_flags |= REQ_DONTPREP;
1788 * 0 : the request has been processed (not requeued)
1789 * !0 : the request has been requeued
1791 static int map_request(struct dm_target *ti, struct request *clone,
1792 struct mapped_device *md)
1794 int r, requeued = 0;
1795 struct dm_rq_target_io *tio = clone->end_io_data;
1798 r = ti->type->map_rq(ti, clone, &tio->info);
1800 case DM_MAPIO_SUBMITTED:
1801 /* The target has taken the I/O to submit by itself later */
1803 case DM_MAPIO_REMAPPED:
1804 /* The target has remapped the I/O so dispatch it */
1805 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1806 blk_rq_pos(tio->orig));
1807 dm_dispatch_request(clone);
1809 case DM_MAPIO_REQUEUE:
1810 /* The target wants to requeue the I/O */
1811 dm_requeue_unmapped_request(clone);
1816 DMWARN("unimplemented target map return value: %d", r);
1820 /* The target wants to complete the I/O */
1821 dm_kill_unmapped_request(clone, r);
1828 static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1830 struct request *clone;
1832 blk_start_request(orig);
1833 clone = orig->special;
1834 atomic_inc(&md->pending[rq_data_dir(clone)]);
1837 * Hold the md reference here for the in-flight I/O.
1838 * We can't rely on the reference count by device opener,
1839 * because the device may be closed during the request completion
1840 * when all bios are completed.
1841 * See the comment in rq_completed() too.
1849 * q->request_fn for request-based dm.
1850 * Called with the queue lock held.
1852 static void dm_request_fn(struct request_queue *q)
1854 struct mapped_device *md = q->queuedata;
1856 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
1857 struct dm_target *ti;
1858 struct request *rq, *clone;
1862 * For suspend, check blk_queue_stopped() and increment
1863 * ->pending within a single queue_lock not to increment the
1864 * number of in-flight I/Os after the queue is stopped in
1867 while (!blk_queue_stopped(q)) {
1868 rq = blk_peek_request(q);
1872 /* always use block 0 to find the target for flushes for now */
1874 if (!(rq->cmd_flags & REQ_FLUSH))
1875 pos = blk_rq_pos(rq);
1877 ti = dm_table_find_target(map, pos);
1878 if (!dm_target_is_valid(ti)) {
1880 * Must perform setup, that dm_done() requires,
1881 * before calling dm_kill_unmapped_request
1883 DMERR_LIMIT("request attempted access beyond the end of device");
1884 clone = dm_start_request(md, rq);
1885 dm_kill_unmapped_request(clone, -EIO);
1889 if (ti->type->busy && ti->type->busy(ti))
1892 clone = dm_start_request(md, rq);
1894 spin_unlock(q->queue_lock);
1895 if (map_request(ti, clone, md))
1898 BUG_ON(!irqs_disabled());
1899 spin_lock(q->queue_lock);
1905 BUG_ON(!irqs_disabled());
1906 spin_lock(q->queue_lock);
1909 blk_delay_queue(q, HZ / 10);
1911 dm_put_live_table(md, srcu_idx);
1914 int dm_underlying_device_busy(struct request_queue *q)
1916 return blk_lld_busy(q);
1918 EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1920 static int dm_lld_busy(struct request_queue *q)
1923 struct mapped_device *md = q->queuedata;
1924 struct dm_table *map = dm_get_live_table_fast(md);
1926 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1929 r = dm_table_any_busy_target(map);
1931 dm_put_live_table_fast(md);
1936 static int dm_any_congested(void *congested_data, int bdi_bits)
1939 struct mapped_device *md = congested_data;
1940 struct dm_table *map;
1942 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1943 map = dm_get_live_table_fast(md);
1946 * Request-based dm cares about only own queue for
1947 * the query about congestion status of request_queue
1949 if (dm_request_based(md))
1950 r = md->queue->backing_dev_info.state &
1953 r = dm_table_any_congested(map, bdi_bits);
1955 dm_put_live_table_fast(md);
1961 /*-----------------------------------------------------------------
1962 * An IDR is used to keep track of allocated minor numbers.
1963 *---------------------------------------------------------------*/
1964 static void free_minor(int minor)
1966 spin_lock(&_minor_lock);
1967 idr_remove(&_minor_idr, minor);
1968 spin_unlock(&_minor_lock);
1972 * See if the device with a specific minor # is free.
1974 static int specific_minor(int minor)
1978 if (minor >= (1 << MINORBITS))
1981 idr_preload(GFP_KERNEL);
1982 spin_lock(&_minor_lock);
1984 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1986 spin_unlock(&_minor_lock);
1989 return r == -ENOSPC ? -EBUSY : r;
1993 static int next_free_minor(int *minor)
1997 idr_preload(GFP_KERNEL);
1998 spin_lock(&_minor_lock);
2000 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
2002 spin_unlock(&_minor_lock);
2010 static const struct block_device_operations dm_blk_dops;
2012 static void dm_wq_work(struct work_struct *work);
2014 static void dm_init_md_queue(struct mapped_device *md)
2017 * Request-based dm devices cannot be stacked on top of bio-based dm
2018 * devices. The type of this dm device has not been decided yet.
2019 * The type is decided at the first table loading time.
2020 * To prevent problematic device stacking, clear the queue flag
2021 * for request stacking support until then.
2023 * This queue is new, so no concurrency on the queue_flags.
2025 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
2027 md->queue->queuedata = md;
2028 md->queue->backing_dev_info.congested_fn = dm_any_congested;
2029 md->queue->backing_dev_info.congested_data = md;
2030 blk_queue_make_request(md->queue, dm_request);
2031 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
2032 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
2036 * Allocate and initialise a blank device with a given minor.
2038 static struct mapped_device *alloc_dev(int minor)
2041 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
2045 DMWARN("unable to allocate device, out of memory.");
2049 if (!try_module_get(THIS_MODULE))
2050 goto bad_module_get;
2052 /* get a minor number for the dev */
2053 if (minor == DM_ANY_MINOR)
2054 r = next_free_minor(&minor);
2056 r = specific_minor(minor);
2060 r = init_srcu_struct(&md->io_barrier);
2062 goto bad_io_barrier;
2064 md->type = DM_TYPE_NONE;
2065 mutex_init(&md->suspend_lock);
2066 mutex_init(&md->type_lock);
2067 mutex_init(&md->table_devices_lock);
2068 spin_lock_init(&md->deferred_lock);
2069 atomic_set(&md->holders, 1);
2070 atomic_set(&md->open_count, 0);
2071 atomic_set(&md->event_nr, 0);
2072 atomic_set(&md->uevent_seq, 0);
2073 INIT_LIST_HEAD(&md->uevent_list);
2074 INIT_LIST_HEAD(&md->table_devices);
2075 spin_lock_init(&md->uevent_lock);
2077 md->queue = blk_alloc_queue(GFP_KERNEL);
2081 dm_init_md_queue(md);
2083 md->disk = alloc_disk(1);
2087 atomic_set(&md->pending[0], 0);
2088 atomic_set(&md->pending[1], 0);
2089 init_waitqueue_head(&md->wait);
2090 INIT_WORK(&md->work, dm_wq_work);
2091 init_waitqueue_head(&md->eventq);
2092 init_completion(&md->kobj_holder.completion);
2094 md->disk->major = _major;
2095 md->disk->first_minor = minor;
2096 md->disk->fops = &dm_blk_dops;
2097 md->disk->queue = md->queue;
2098 md->disk->private_data = md;
2099 sprintf(md->disk->disk_name, "dm-%d", minor);
2101 format_dev_t(md->name, MKDEV(_major, minor));
2103 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
2107 md->bdev = bdget_disk(md->disk, 0);
2111 bio_init(&md->flush_bio);
2112 md->flush_bio.bi_bdev = md->bdev;
2113 md->flush_bio.bi_rw = WRITE_FLUSH;
2115 dm_stats_init(&md->stats);
2117 /* Populate the mapping, nobody knows we exist yet */
2118 spin_lock(&_minor_lock);
2119 old_md = idr_replace(&_minor_idr, md, minor);
2120 spin_unlock(&_minor_lock);
2122 BUG_ON(old_md != MINOR_ALLOCED);
2127 destroy_workqueue(md->wq);
2129 del_gendisk(md->disk);
2132 blk_cleanup_queue(md->queue);
2134 cleanup_srcu_struct(&md->io_barrier);
2138 module_put(THIS_MODULE);
2144 static void unlock_fs(struct mapped_device *md);
2146 static void free_dev(struct mapped_device *md)
2148 int minor = MINOR(disk_devt(md->disk));
2152 destroy_workqueue(md->wq);
2154 mempool_destroy(md->io_pool);
2156 bioset_free(md->bs);
2157 blk_integrity_unregister(md->disk);
2158 del_gendisk(md->disk);
2159 cleanup_srcu_struct(&md->io_barrier);
2160 free_table_devices(&md->table_devices);
2163 spin_lock(&_minor_lock);
2164 md->disk->private_data = NULL;
2165 spin_unlock(&_minor_lock);
2168 blk_cleanup_queue(md->queue);
2169 dm_stats_cleanup(&md->stats);
2170 module_put(THIS_MODULE);
2174 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2176 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
2178 if (md->io_pool && md->bs) {
2179 /* The md already has necessary mempools. */
2180 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
2182 * Reload bioset because front_pad may have changed
2183 * because a different table was loaded.
2185 bioset_free(md->bs);
2188 } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
2190 * There's no need to reload with request-based dm
2191 * because the size of front_pad doesn't change.
2192 * Note for future: If you are to reload bioset,
2193 * prep-ed requests in the queue may refer
2194 * to bio from the old bioset, so you must walk
2195 * through the queue to unprep.
2201 BUG_ON(!p || md->io_pool || md->bs);
2203 md->io_pool = p->io_pool;
2209 /* mempool bind completed, now no need any mempools in the table */
2210 dm_table_free_md_mempools(t);
2214 * Bind a table to the device.
2216 static void event_callback(void *context)
2218 unsigned long flags;
2220 struct mapped_device *md = (struct mapped_device *) context;
2222 spin_lock_irqsave(&md->uevent_lock, flags);
2223 list_splice_init(&md->uevent_list, &uevents);
2224 spin_unlock_irqrestore(&md->uevent_lock, flags);
2226 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2228 atomic_inc(&md->event_nr);
2229 wake_up(&md->eventq);
2233 * Protected by md->suspend_lock obtained by dm_swap_table().
2235 static void __set_size(struct mapped_device *md, sector_t size)
2237 set_capacity(md->disk, size);
2239 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2243 * Return 1 if the queue has a compulsory merge_bvec_fn function.
2245 * If this function returns 0, then the device is either a non-dm
2246 * device without a merge_bvec_fn, or it is a dm device that is
2247 * able to split any bios it receives that are too big.
2249 int dm_queue_merge_is_compulsory(struct request_queue *q)
2251 struct mapped_device *dev_md;
2253 if (!q->merge_bvec_fn)
2256 if (q->make_request_fn == dm_request) {
2257 dev_md = q->queuedata;
2258 if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
2265 static int dm_device_merge_is_compulsory(struct dm_target *ti,
2266 struct dm_dev *dev, sector_t start,
2267 sector_t len, void *data)
2269 struct block_device *bdev = dev->bdev;
2270 struct request_queue *q = bdev_get_queue(bdev);
2272 return dm_queue_merge_is_compulsory(q);
2276 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2277 * on the properties of the underlying devices.
2279 static int dm_table_merge_is_optional(struct dm_table *table)
2282 struct dm_target *ti;
2284 while (i < dm_table_get_num_targets(table)) {
2285 ti = dm_table_get_target(table, i++);
2287 if (ti->type->iterate_devices &&
2288 ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
2296 * Returns old map, which caller must destroy.
2298 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2299 struct queue_limits *limits)
2301 struct dm_table *old_map;
2302 struct request_queue *q = md->queue;
2304 int merge_is_optional;
2306 size = dm_table_get_size(t);
2309 * Wipe any geometry if the size of the table changed.
2311 if (size != dm_get_size(md))
2312 memset(&md->geometry, 0, sizeof(md->geometry));
2314 __set_size(md, size);
2316 dm_table_event_callback(t, event_callback, md);
2319 * The queue hasn't been stopped yet, if the old table type wasn't
2320 * for request-based during suspension. So stop it to prevent
2321 * I/O mapping before resume.
2322 * This must be done before setting the queue restrictions,
2323 * because request-based dm may be run just after the setting.
2325 if (dm_table_request_based(t) && !blk_queue_stopped(q))
2328 __bind_mempools(md, t);
2330 merge_is_optional = dm_table_merge_is_optional(t);
2332 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2333 rcu_assign_pointer(md->map, t);
2334 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2336 dm_table_set_restrictions(t, q, limits);
2337 if (merge_is_optional)
2338 set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2340 clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2348 * Returns unbound table for the caller to free.
2350 static struct dm_table *__unbind(struct mapped_device *md)
2352 struct dm_table *map = rcu_dereference_protected(md->map, 1);
2357 dm_table_event_callback(map, NULL, NULL);
2358 RCU_INIT_POINTER(md->map, NULL);
2365 * Constructor for a new device.
2367 int dm_create(int minor, struct mapped_device **result)
2369 struct mapped_device *md;
2371 md = alloc_dev(minor);
2382 * Functions to manage md->type.
2383 * All are required to hold md->type_lock.
2385 void dm_lock_md_type(struct mapped_device *md)
2387 mutex_lock(&md->type_lock);
2390 void dm_unlock_md_type(struct mapped_device *md)
2392 mutex_unlock(&md->type_lock);
2395 void dm_set_md_type(struct mapped_device *md, unsigned type)
2397 BUG_ON(!mutex_is_locked(&md->type_lock));
2401 unsigned dm_get_md_type(struct mapped_device *md)
2403 BUG_ON(!mutex_is_locked(&md->type_lock));
2407 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2409 return md->immutable_target_type;
2413 * The queue_limits are only valid as long as you have a reference
2416 struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2418 BUG_ON(!atomic_read(&md->holders));
2419 return &md->queue->limits;
2421 EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2424 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2426 static int dm_init_request_based_queue(struct mapped_device *md)
2428 struct request_queue *q = NULL;
2430 if (md->queue->elevator)
2433 /* Fully initialize the queue */
2434 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2439 dm_init_md_queue(md);
2440 blk_queue_softirq_done(md->queue, dm_softirq_done);
2441 blk_queue_prep_rq(md->queue, dm_prep_fn);
2442 blk_queue_lld_busy(md->queue, dm_lld_busy);
2444 elv_register_queue(md->queue);
2450 * Setup the DM device's queue based on md's type
2452 int dm_setup_md_queue(struct mapped_device *md)
2454 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2455 !dm_init_request_based_queue(md)) {
2456 DMWARN("Cannot initialize queue for request-based mapped device");
2463 static struct mapped_device *dm_find_md(dev_t dev)
2465 struct mapped_device *md;
2466 unsigned minor = MINOR(dev);
2468 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2471 spin_lock(&_minor_lock);
2473 md = idr_find(&_minor_idr, minor);
2474 if (md && (md == MINOR_ALLOCED ||
2475 (MINOR(disk_devt(dm_disk(md))) != minor) ||
2476 dm_deleting_md(md) ||
2477 test_bit(DMF_FREEING, &md->flags))) {
2483 spin_unlock(&_minor_lock);
2488 struct mapped_device *dm_get_md(dev_t dev)
2490 struct mapped_device *md = dm_find_md(dev);
2497 EXPORT_SYMBOL_GPL(dm_get_md);
2499 void *dm_get_mdptr(struct mapped_device *md)
2501 return md->interface_ptr;
2504 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2506 md->interface_ptr = ptr;
2509 void dm_get(struct mapped_device *md)
2511 atomic_inc(&md->holders);
2512 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2515 const char *dm_device_name(struct mapped_device *md)
2519 EXPORT_SYMBOL_GPL(dm_device_name);
2521 static void __dm_destroy(struct mapped_device *md, bool wait)
2523 struct dm_table *map;
2528 spin_lock(&_minor_lock);
2529 map = dm_get_live_table(md, &srcu_idx);
2530 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2531 set_bit(DMF_FREEING, &md->flags);
2532 spin_unlock(&_minor_lock);
2534 if (!dm_suspended_md(md)) {
2535 dm_table_presuspend_targets(map);
2536 dm_table_postsuspend_targets(map);
2539 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2540 dm_put_live_table(md, srcu_idx);
2543 * Rare, but there may be I/O requests still going to complete,
2544 * for example. Wait for all references to disappear.
2545 * No one should increment the reference count of the mapped_device,
2546 * after the mapped_device state becomes DMF_FREEING.
2549 while (atomic_read(&md->holders))
2551 else if (atomic_read(&md->holders))
2552 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2553 dm_device_name(md), atomic_read(&md->holders));
2556 dm_table_destroy(__unbind(md));
2560 void dm_destroy(struct mapped_device *md)
2562 __dm_destroy(md, true);
2565 void dm_destroy_immediate(struct mapped_device *md)
2567 __dm_destroy(md, false);
2570 void dm_put(struct mapped_device *md)
2572 atomic_dec(&md->holders);
2574 EXPORT_SYMBOL_GPL(dm_put);
2576 static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2579 DECLARE_WAITQUEUE(wait, current);
2581 add_wait_queue(&md->wait, &wait);
2584 set_current_state(interruptible);
2586 if (!md_in_flight(md))
2589 if (interruptible == TASK_INTERRUPTIBLE &&
2590 signal_pending(current)) {
2597 set_current_state(TASK_RUNNING);
2599 remove_wait_queue(&md->wait, &wait);
2605 * Process the deferred bios
2607 static void dm_wq_work(struct work_struct *work)
2609 struct mapped_device *md = container_of(work, struct mapped_device,
2613 struct dm_table *map;
2615 map = dm_get_live_table(md, &srcu_idx);
2617 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2618 spin_lock_irq(&md->deferred_lock);
2619 c = bio_list_pop(&md->deferred);
2620 spin_unlock_irq(&md->deferred_lock);
2625 if (dm_request_based(md))
2626 generic_make_request(c);
2628 __split_and_process_bio(md, map, c);
2631 dm_put_live_table(md, srcu_idx);
2634 static void dm_queue_flush(struct mapped_device *md)
2636 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2637 smp_mb__after_atomic();
2638 queue_work(md->wq, &md->work);
2642 * Swap in a new table, returning the old one for the caller to destroy.
2644 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2646 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2647 struct queue_limits limits;
2650 mutex_lock(&md->suspend_lock);
2652 /* device must be suspended */
2653 if (!dm_suspended_md(md))
2657 * If the new table has no data devices, retain the existing limits.
2658 * This helps multipath with queue_if_no_path if all paths disappear,
2659 * then new I/O is queued based on these limits, and then some paths
2662 if (dm_table_has_no_data_devices(table)) {
2663 live_map = dm_get_live_table_fast(md);
2665 limits = md->queue->limits;
2666 dm_put_live_table_fast(md);
2670 r = dm_calculate_queue_limits(table, &limits);
2677 map = __bind(md, table, &limits);
2680 mutex_unlock(&md->suspend_lock);
2685 * Functions to lock and unlock any filesystem running on the
2688 static int lock_fs(struct mapped_device *md)
2692 WARN_ON(md->frozen_sb);
2694 md->frozen_sb = freeze_bdev(md->bdev);
2695 if (IS_ERR(md->frozen_sb)) {
2696 r = PTR_ERR(md->frozen_sb);
2697 md->frozen_sb = NULL;
2701 set_bit(DMF_FROZEN, &md->flags);
2706 static void unlock_fs(struct mapped_device *md)
2708 if (!test_bit(DMF_FROZEN, &md->flags))
2711 thaw_bdev(md->bdev, md->frozen_sb);
2712 md->frozen_sb = NULL;
2713 clear_bit(DMF_FROZEN, &md->flags);
2717 * If __dm_suspend returns 0, the device is completely quiescent
2718 * now. There is no request-processing activity. All new requests
2719 * are being added to md->deferred list.
2721 * Caller must hold md->suspend_lock
2723 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2724 unsigned suspend_flags, int interruptible)
2726 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2727 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2731 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2732 * This flag is cleared before dm_suspend returns.
2735 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2738 * This gets reverted if there's an error later and the targets
2739 * provide the .presuspend_undo hook.
2741 dm_table_presuspend_targets(map);
2744 * Flush I/O to the device.
2745 * Any I/O submitted after lock_fs() may not be flushed.
2746 * noflush takes precedence over do_lockfs.
2747 * (lock_fs() flushes I/Os and waits for them to complete.)
2749 if (!noflush && do_lockfs) {
2752 dm_table_presuspend_undo_targets(map);
2758 * Here we must make sure that no processes are submitting requests
2759 * to target drivers i.e. no one may be executing
2760 * __split_and_process_bio. This is called from dm_request and
2763 * To get all processes out of __split_and_process_bio in dm_request,
2764 * we take the write lock. To prevent any process from reentering
2765 * __split_and_process_bio from dm_request and quiesce the thread
2766 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2767 * flush_workqueue(md->wq).
2769 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2771 synchronize_srcu(&md->io_barrier);
2774 * Stop md->queue before flushing md->wq in case request-based
2775 * dm defers requests to md->wq from md->queue.
2777 if (dm_request_based(md))
2778 stop_queue(md->queue);
2780 flush_workqueue(md->wq);
2783 * At this point no more requests are entering target request routines.
2784 * We call dm_wait_for_completion to wait for all existing requests
2787 r = dm_wait_for_completion(md, interruptible);
2790 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2792 synchronize_srcu(&md->io_barrier);
2794 /* were we interrupted ? */
2798 if (dm_request_based(md))
2799 start_queue(md->queue);
2802 dm_table_presuspend_undo_targets(map);
2803 /* pushback list is already flushed, so skip flush */
2810 * We need to be able to change a mapping table under a mounted
2811 * filesystem. For example we might want to move some data in
2812 * the background. Before the table can be swapped with
2813 * dm_bind_table, dm_suspend must be called to flush any in
2814 * flight bios and ensure that any further io gets deferred.
2817 * Suspend mechanism in request-based dm.
2819 * 1. Flush all I/Os by lock_fs() if needed.
2820 * 2. Stop dispatching any I/O by stopping the request_queue.
2821 * 3. Wait for all in-flight I/Os to be completed or requeued.
2823 * To abort suspend, start the request_queue.
2825 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2827 struct dm_table *map = NULL;
2831 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2833 if (dm_suspended_md(md)) {
2838 if (dm_suspended_internally_md(md)) {
2839 /* already internally suspended, wait for internal resume */
2840 mutex_unlock(&md->suspend_lock);
2841 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2847 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2849 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
2853 set_bit(DMF_SUSPENDED, &md->flags);
2855 dm_table_postsuspend_targets(map);
2858 mutex_unlock(&md->suspend_lock);
2862 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2865 int r = dm_table_resume_targets(map);
2873 * Flushing deferred I/Os must be done after targets are resumed
2874 * so that mapping of targets can work correctly.
2875 * Request-based dm is queueing the deferred I/Os in its request_queue.
2877 if (dm_request_based(md))
2878 start_queue(md->queue);
2885 int dm_resume(struct mapped_device *md)
2888 struct dm_table *map = NULL;
2891 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2893 if (!dm_suspended_md(md))
2896 if (dm_suspended_internally_md(md)) {
2897 /* already internally suspended, wait for internal resume */
2898 mutex_unlock(&md->suspend_lock);
2899 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2905 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2906 if (!map || !dm_table_get_size(map))
2909 r = __dm_resume(md, map);
2913 clear_bit(DMF_SUSPENDED, &md->flags);
2917 mutex_unlock(&md->suspend_lock);
2923 * Internal suspend/resume works like userspace-driven suspend. It waits
2924 * until all bios finish and prevents issuing new bios to the target drivers.
2925 * It may be used only from the kernel.
2928 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
2930 struct dm_table *map = NULL;
2932 if (dm_suspended_internally_md(md))
2933 return; /* nested internal suspend */
2935 if (dm_suspended_md(md)) {
2936 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2937 return; /* nest suspend */
2940 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2943 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2944 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
2945 * would require changing .presuspend to return an error -- avoid this
2946 * until there is a need for more elaborate variants of internal suspend.
2948 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);
2950 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2952 dm_table_postsuspend_targets(map);
2955 static void __dm_internal_resume(struct mapped_device *md)
2957 if (!dm_suspended_internally_md(md))
2958 return; /* resume from nested internal suspend */
2960 if (dm_suspended_md(md))
2961 goto done; /* resume from nested suspend */
2964 * NOTE: existing callers don't need to call dm_table_resume_targets
2965 * (which may fail -- so best to avoid it for now by passing NULL map)
2967 (void) __dm_resume(md, NULL);
2970 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2971 smp_mb__after_atomic();
2972 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2975 void dm_internal_suspend_noflush(struct mapped_device *md)
2977 mutex_lock(&md->suspend_lock);
2978 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2979 mutex_unlock(&md->suspend_lock);
2981 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2983 void dm_internal_resume(struct mapped_device *md)
2985 mutex_lock(&md->suspend_lock);
2986 __dm_internal_resume(md);
2987 mutex_unlock(&md->suspend_lock);
2989 EXPORT_SYMBOL_GPL(dm_internal_resume);
2992 * Fast variants of internal suspend/resume hold md->suspend_lock,
2993 * which prevents interaction with userspace-driven suspend.
2996 void dm_internal_suspend_fast(struct mapped_device *md)
2998 mutex_lock(&md->suspend_lock);
2999 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3002 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3003 synchronize_srcu(&md->io_barrier);
3004 flush_workqueue(md->wq);
3005 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
3008 void dm_internal_resume_fast(struct mapped_device *md)
3010 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3016 mutex_unlock(&md->suspend_lock);
3019 /*-----------------------------------------------------------------
3020 * Event notification.
3021 *---------------------------------------------------------------*/
3022 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
3025 char udev_cookie[DM_COOKIE_LENGTH];
3026 char *envp[] = { udev_cookie, NULL };
3029 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
3031 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
3032 DM_COOKIE_ENV_VAR_NAME, cookie);
3033 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
3038 uint32_t dm_next_uevent_seq(struct mapped_device *md)
3040 return atomic_add_return(1, &md->uevent_seq);
3043 uint32_t dm_get_event_nr(struct mapped_device *md)
3045 return atomic_read(&md->event_nr);
3048 int dm_wait_event(struct mapped_device *md, int event_nr)
3050 return wait_event_interruptible(md->eventq,
3051 (event_nr != atomic_read(&md->event_nr)));
3054 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
3056 unsigned long flags;
3058 spin_lock_irqsave(&md->uevent_lock, flags);
3059 list_add(elist, &md->uevent_list);
3060 spin_unlock_irqrestore(&md->uevent_lock, flags);
3064 * The gendisk is only valid as long as you have a reference
3067 struct gendisk *dm_disk(struct mapped_device *md)
3072 struct kobject *dm_kobject(struct mapped_device *md)
3074 return &md->kobj_holder.kobj;
3077 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
3079 struct mapped_device *md;
3081 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
3083 if (test_bit(DMF_FREEING, &md->flags) ||
3091 int dm_suspended_md(struct mapped_device *md)
3093 return test_bit(DMF_SUSPENDED, &md->flags);
3096 int dm_suspended_internally_md(struct mapped_device *md)
3098 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3101 int dm_test_deferred_remove_flag(struct mapped_device *md)
3103 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
3106 int dm_suspended(struct dm_target *ti)
3108 return dm_suspended_md(dm_table_get_md(ti->table));
3110 EXPORT_SYMBOL_GPL(dm_suspended);
3112 int dm_noflush_suspending(struct dm_target *ti)
3114 return __noflush_suspending(dm_table_get_md(ti->table));
3116 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3118 struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
3120 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
3121 struct kmem_cache *cachep;
3122 unsigned int pool_size;
3123 unsigned int front_pad;
3128 if (type == DM_TYPE_BIO_BASED) {
3130 pool_size = dm_get_reserved_bio_based_ios();
3131 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
3132 } else if (type == DM_TYPE_REQUEST_BASED) {
3133 cachep = _rq_tio_cache;
3134 pool_size = dm_get_reserved_rq_based_ios();
3135 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
3136 /* per_bio_data_size is not used. See __bind_mempools(). */
3137 WARN_ON(per_bio_data_size != 0);
3141 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
3142 if (!pools->io_pool)
3145 pools->bs = bioset_create_nobvec(pool_size, front_pad);
3149 if (integrity && bioset_integrity_create(pools->bs, pool_size))
3155 dm_free_md_mempools(pools);
3160 void dm_free_md_mempools(struct dm_md_mempools *pools)
3166 mempool_destroy(pools->io_pool);
3169 bioset_free(pools->bs);
3174 static const struct block_device_operations dm_blk_dops = {
3175 .open = dm_blk_open,
3176 .release = dm_blk_close,
3177 .ioctl = dm_blk_ioctl,
3178 .getgeo = dm_blk_getgeo,
3179 .owner = THIS_MODULE
3185 module_init(dm_init);
3186 module_exit(dm_exit);
3188 module_param(major, uint, 0);
3189 MODULE_PARM_DESC(major, "The major number of the device mapper");
3191 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3192 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3194 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
3195 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
3197 MODULE_DESCRIPTION(DM_NAME " driver");
3198 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3199 MODULE_LICENSE("GPL");