dm: fix handling of multiple internal suspends
[linux-2.6-block.git] / drivers / md / dm.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
784aae73 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
51e5b2bd 9#include "dm-uevent.h"
1da177e4
LT
10
11#include <linux/init.h>
12#include <linux/module.h>
48c9c27b 13#include <linux/mutex.h>
1da177e4
LT
14#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
1da177e4
LT
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/idr.h>
3ac51e74 20#include <linux/hdreg.h>
3f77316d 21#include <linux/delay.h>
ffcc3936 22#include <linux/wait.h>
55782138
LZ
23
24#include <trace/events/block.h>
1da177e4 25
72d94861
AK
26#define DM_MSG_PREFIX "core"
27
71a16736
NK
28#ifdef CONFIG_PRINTK
29/*
30 * ratelimit state to be used in DMXXX_LIMIT().
31 */
32DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
33 DEFAULT_RATELIMIT_INTERVAL,
34 DEFAULT_RATELIMIT_BURST);
35EXPORT_SYMBOL(dm_ratelimit_state);
36#endif
37
60935eb2
MB
38/*
39 * Cookies are numeric values sent with CHANGE and REMOVE
40 * uevents while resuming, removing or renaming the device.
41 */
42#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
43#define DM_COOKIE_LENGTH 24
44
1da177e4
LT
45static const char *_name = DM_NAME;
46
47static unsigned int major = 0;
48static unsigned int _major = 0;
49
d15b774c
AK
50static DEFINE_IDR(_minor_idr);
51
f32c10b0 52static DEFINE_SPINLOCK(_minor_lock);
2c140a24
MP
53
54static void do_deferred_remove(struct work_struct *w);
55
56static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
57
acfe0ad7
MP
58static struct workqueue_struct *deferred_remove_workqueue;
59
1da177e4 60/*
8fbf26ad 61 * For bio-based dm.
1da177e4
LT
62 * One of these is allocated per bio.
63 */
64struct dm_io {
65 struct mapped_device *md;
66 int error;
1da177e4 67 atomic_t io_count;
6ae2fa67 68 struct bio *bio;
3eaf840e 69 unsigned long start_time;
f88fb981 70 spinlock_t endio_lock;
fd2ed4d2 71 struct dm_stats_aux stats_aux;
1da177e4
LT
72};
73
8fbf26ad
KU
74/*
75 * For request-based dm.
76 * One of these is allocated per request.
77 */
78struct dm_rq_target_io {
79 struct mapped_device *md;
80 struct dm_target *ti;
81 struct request *orig, clone;
82 int error;
83 union map_info info;
84};
85
86/*
94818742
KO
87 * For request-based dm - the bio clones we allocate are embedded in these
88 * structs.
89 *
90 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
91 * the bioset is created - this means the bio has to come at the end of the
92 * struct.
8fbf26ad
KU
93 */
94struct dm_rq_clone_bio_info {
95 struct bio *orig;
cec47e3d 96 struct dm_rq_target_io *tio;
94818742 97 struct bio clone;
8fbf26ad
KU
98};
99
cec47e3d
KU
100union map_info *dm_get_rq_mapinfo(struct request *rq)
101{
102 if (rq && rq->end_io_data)
103 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
104 return NULL;
105}
106EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
107
ba61fdd1
JM
108#define MINOR_ALLOCED ((void *)-1)
109
1da177e4
LT
110/*
111 * Bits for the md->flags field.
112 */
1eb787ec 113#define DMF_BLOCK_IO_FOR_SUSPEND 0
1da177e4 114#define DMF_SUSPENDED 1
aa8d7c2f 115#define DMF_FROZEN 2
fba9f90e 116#define DMF_FREEING 3
5c6bd75d 117#define DMF_DELETING 4
2e93ccc1 118#define DMF_NOFLUSH_SUSPENDING 5
d5b9dd04 119#define DMF_MERGE_IS_OPTIONAL 6
2c140a24 120#define DMF_DEFERRED_REMOVE 7
ffcc3936 121#define DMF_SUSPENDED_INTERNALLY 8
1da177e4 122
83d5e5b0
MP
123/*
124 * A dummy definition to make RCU happy.
125 * struct dm_table should never be dereferenced in this file.
126 */
127struct dm_table {
128 int undefined__;
129};
130
304f3f6a
MB
131/*
132 * Work processed by per-device workqueue.
133 */
1da177e4 134struct mapped_device {
83d5e5b0 135 struct srcu_struct io_barrier;
e61290a4 136 struct mutex suspend_lock;
1da177e4 137 atomic_t holders;
5c6bd75d 138 atomic_t open_count;
1da177e4 139
2a7faeb1
MP
140 /*
141 * The current mapping.
142 * Use dm_get_live_table{_fast} or take suspend_lock for
143 * dereference.
144 */
6fa99520 145 struct dm_table __rcu *map;
2a7faeb1 146
86f1152b
BM
147 struct list_head table_devices;
148 struct mutex table_devices_lock;
149
1da177e4
LT
150 unsigned long flags;
151
165125e1 152 struct request_queue *queue;
a5664dad 153 unsigned type;
4a0b4ddf 154 /* Protect queue and type against concurrent access. */
a5664dad
MS
155 struct mutex type_lock;
156
36a0456f
AK
157 struct target_type *immutable_target_type;
158
1da177e4 159 struct gendisk *disk;
7e51f257 160 char name[16];
1da177e4
LT
161
162 void *interface_ptr;
163
164 /*
165 * A list of ios that arrived while we were suspended.
166 */
316d315b 167 atomic_t pending[2];
1da177e4 168 wait_queue_head_t wait;
53d5914f 169 struct work_struct work;
74859364 170 struct bio_list deferred;
022c2611 171 spinlock_t deferred_lock;
1da177e4 172
af7e466a 173 /*
29e4013d 174 * Processing queue (flush)
304f3f6a
MB
175 */
176 struct workqueue_struct *wq;
177
1da177e4
LT
178 /*
179 * io objects are allocated from here.
180 */
181 mempool_t *io_pool;
1da177e4 182
9faf400f
SB
183 struct bio_set *bs;
184
1da177e4
LT
185 /*
186 * Event handling.
187 */
188 atomic_t event_nr;
189 wait_queue_head_t eventq;
7a8c3d3b
MA
190 atomic_t uevent_seq;
191 struct list_head uevent_list;
192 spinlock_t uevent_lock; /* Protect access to uevent_list */
1da177e4
LT
193
194 /*
195 * freeze/thaw support require holding onto a super block
196 */
197 struct super_block *frozen_sb;
db8fef4f 198 struct block_device *bdev;
3ac51e74
DW
199
200 /* forced geometry settings */
201 struct hd_geometry geometry;
784aae73 202
2995fa78
MP
203 /* kobject and completion */
204 struct dm_kobject_holder kobj_holder;
be35f486 205
d87f4c14
TH
206 /* zero-length flush that will be cloned and submitted to targets */
207 struct bio flush_bio;
fd2ed4d2 208
96b26c8c
MP
209 /* the number of internal suspends */
210 unsigned internal_suspend_count;
211
fd2ed4d2 212 struct dm_stats stats;
1da177e4
LT
213};
214
e6ee8c0b
KU
215/*
216 * For mempools pre-allocation at the table loading time.
217 */
218struct dm_md_mempools {
219 mempool_t *io_pool;
e6ee8c0b
KU
220 struct bio_set *bs;
221};
222
86f1152b
BM
223struct table_device {
224 struct list_head list;
225 atomic_t count;
226 struct dm_dev dm_dev;
227};
228
6cfa5857
MS
229#define RESERVED_BIO_BASED_IOS 16
230#define RESERVED_REQUEST_BASED_IOS 256
f4790826 231#define RESERVED_MAX_IOS 1024
e18b890b 232static struct kmem_cache *_io_cache;
8fbf26ad 233static struct kmem_cache *_rq_tio_cache;
94818742 234
e8603136
MS
235/*
236 * Bio-based DM's mempools' reserved IOs set by the user.
237 */
238static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
239
f4790826
MS
240/*
241 * Request-based DM's mempools' reserved IOs set by the user.
242 */
243static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
244
245static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
246 unsigned def, unsigned max)
247{
248 unsigned ios = ACCESS_ONCE(*reserved_ios);
249 unsigned modified_ios = 0;
250
251 if (!ios)
252 modified_ios = def;
253 else if (ios > max)
254 modified_ios = max;
255
256 if (modified_ios) {
257 (void)cmpxchg(reserved_ios, ios, modified_ios);
258 ios = modified_ios;
259 }
260
261 return ios;
262}
263
e8603136
MS
264unsigned dm_get_reserved_bio_based_ios(void)
265{
266 return __dm_get_reserved_ios(&reserved_bio_based_ios,
267 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
268}
269EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
270
f4790826
MS
271unsigned dm_get_reserved_rq_based_ios(void)
272{
273 return __dm_get_reserved_ios(&reserved_rq_based_ios,
274 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
275}
276EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
277
1da177e4
LT
278static int __init local_init(void)
279{
51157b4a 280 int r = -ENOMEM;
1da177e4 281
1da177e4 282 /* allocate a slab for the dm_ios */
028867ac 283 _io_cache = KMEM_CACHE(dm_io, 0);
1da177e4 284 if (!_io_cache)
51157b4a 285 return r;
1da177e4 286
8fbf26ad
KU
287 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
288 if (!_rq_tio_cache)
dba14160 289 goto out_free_io_cache;
8fbf26ad 290
51e5b2bd 291 r = dm_uevent_init();
51157b4a 292 if (r)
23e5083b 293 goto out_free_rq_tio_cache;
51e5b2bd 294
acfe0ad7
MP
295 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
296 if (!deferred_remove_workqueue) {
297 r = -ENOMEM;
298 goto out_uevent_exit;
299 }
300
1da177e4
LT
301 _major = major;
302 r = register_blkdev(_major, _name);
51157b4a 303 if (r < 0)
acfe0ad7 304 goto out_free_workqueue;
1da177e4
LT
305
306 if (!_major)
307 _major = r;
308
309 return 0;
51157b4a 310
acfe0ad7
MP
311out_free_workqueue:
312 destroy_workqueue(deferred_remove_workqueue);
51157b4a
KU
313out_uevent_exit:
314 dm_uevent_exit();
8fbf26ad
KU
315out_free_rq_tio_cache:
316 kmem_cache_destroy(_rq_tio_cache);
51157b4a
KU
317out_free_io_cache:
318 kmem_cache_destroy(_io_cache);
319
320 return r;
1da177e4
LT
321}
322
323static void local_exit(void)
324{
2c140a24 325 flush_scheduled_work();
acfe0ad7 326 destroy_workqueue(deferred_remove_workqueue);
2c140a24 327
8fbf26ad 328 kmem_cache_destroy(_rq_tio_cache);
1da177e4 329 kmem_cache_destroy(_io_cache);
00d59405 330 unregister_blkdev(_major, _name);
51e5b2bd 331 dm_uevent_exit();
1da177e4
LT
332
333 _major = 0;
334
335 DMINFO("cleaned up");
336}
337
b9249e55 338static int (*_inits[])(void) __initdata = {
1da177e4
LT
339 local_init,
340 dm_target_init,
341 dm_linear_init,
342 dm_stripe_init,
952b3557 343 dm_io_init,
945fa4d2 344 dm_kcopyd_init,
1da177e4 345 dm_interface_init,
fd2ed4d2 346 dm_statistics_init,
1da177e4
LT
347};
348
b9249e55 349static void (*_exits[])(void) = {
1da177e4
LT
350 local_exit,
351 dm_target_exit,
352 dm_linear_exit,
353 dm_stripe_exit,
952b3557 354 dm_io_exit,
945fa4d2 355 dm_kcopyd_exit,
1da177e4 356 dm_interface_exit,
fd2ed4d2 357 dm_statistics_exit,
1da177e4
LT
358};
359
360static int __init dm_init(void)
361{
362 const int count = ARRAY_SIZE(_inits);
363
364 int r, i;
365
366 for (i = 0; i < count; i++) {
367 r = _inits[i]();
368 if (r)
369 goto bad;
370 }
371
372 return 0;
373
374 bad:
375 while (i--)
376 _exits[i]();
377
378 return r;
379}
380
381static void __exit dm_exit(void)
382{
383 int i = ARRAY_SIZE(_exits);
384
385 while (i--)
386 _exits[i]();
d15b774c
AK
387
388 /*
389 * Should be empty by this point.
390 */
d15b774c 391 idr_destroy(&_minor_idr);
1da177e4
LT
392}
393
394/*
395 * Block device functions
396 */
432a212c
MA
397int dm_deleting_md(struct mapped_device *md)
398{
399 return test_bit(DMF_DELETING, &md->flags);
400}
401
fe5f9f2c 402static int dm_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4
LT
403{
404 struct mapped_device *md;
405
fba9f90e
JM
406 spin_lock(&_minor_lock);
407
fe5f9f2c 408 md = bdev->bd_disk->private_data;
fba9f90e
JM
409 if (!md)
410 goto out;
411
5c6bd75d 412 if (test_bit(DMF_FREEING, &md->flags) ||
432a212c 413 dm_deleting_md(md)) {
fba9f90e
JM
414 md = NULL;
415 goto out;
416 }
417
1da177e4 418 dm_get(md);
5c6bd75d 419 atomic_inc(&md->open_count);
fba9f90e
JM
420
421out:
422 spin_unlock(&_minor_lock);
423
424 return md ? 0 : -ENXIO;
1da177e4
LT
425}
426
db2a144b 427static void dm_blk_close(struct gendisk *disk, fmode_t mode)
1da177e4 428{
fe5f9f2c 429 struct mapped_device *md = disk->private_data;
6e9624b8 430
4a1aeb98
MB
431 spin_lock(&_minor_lock);
432
2c140a24
MP
433 if (atomic_dec_and_test(&md->open_count) &&
434 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
acfe0ad7 435 queue_work(deferred_remove_workqueue, &deferred_remove_work);
2c140a24 436
1da177e4 437 dm_put(md);
4a1aeb98
MB
438
439 spin_unlock(&_minor_lock);
1da177e4
LT
440}
441
5c6bd75d
AK
442int dm_open_count(struct mapped_device *md)
443{
444 return atomic_read(&md->open_count);
445}
446
447/*
448 * Guarantees nothing is using the device before it's deleted.
449 */
2c140a24 450int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
5c6bd75d
AK
451{
452 int r = 0;
453
454 spin_lock(&_minor_lock);
455
2c140a24 456 if (dm_open_count(md)) {
5c6bd75d 457 r = -EBUSY;
2c140a24
MP
458 if (mark_deferred)
459 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
460 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
461 r = -EEXIST;
5c6bd75d
AK
462 else
463 set_bit(DMF_DELETING, &md->flags);
464
465 spin_unlock(&_minor_lock);
466
467 return r;
468}
469
2c140a24
MP
470int dm_cancel_deferred_remove(struct mapped_device *md)
471{
472 int r = 0;
473
474 spin_lock(&_minor_lock);
475
476 if (test_bit(DMF_DELETING, &md->flags))
477 r = -EBUSY;
478 else
479 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
480
481 spin_unlock(&_minor_lock);
482
483 return r;
484}
485
486static void do_deferred_remove(struct work_struct *w)
487{
488 dm_deferred_remove();
489}
490
fd2ed4d2
MP
491sector_t dm_get_size(struct mapped_device *md)
492{
493 return get_capacity(md->disk);
494}
495
9974fa2c
MS
496struct request_queue *dm_get_md_queue(struct mapped_device *md)
497{
498 return md->queue;
499}
500
fd2ed4d2
MP
501struct dm_stats *dm_get_stats(struct mapped_device *md)
502{
503 return &md->stats;
504}
505
3ac51e74
DW
506static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
507{
508 struct mapped_device *md = bdev->bd_disk->private_data;
509
510 return dm_get_geometry(md, geo);
511}
512
fe5f9f2c 513static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
aa129a22
MB
514 unsigned int cmd, unsigned long arg)
515{
fe5f9f2c 516 struct mapped_device *md = bdev->bd_disk->private_data;
83d5e5b0 517 int srcu_idx;
6c182cd8 518 struct dm_table *map;
aa129a22
MB
519 struct dm_target *tgt;
520 int r = -ENOTTY;
521
6c182cd8 522retry:
83d5e5b0
MP
523 map = dm_get_live_table(md, &srcu_idx);
524
aa129a22
MB
525 if (!map || !dm_table_get_size(map))
526 goto out;
527
528 /* We only support devices that have a single target */
529 if (dm_table_get_num_targets(map) != 1)
530 goto out;
531
532 tgt = dm_table_get_target(map, 0);
4d341d82
MS
533 if (!tgt->type->ioctl)
534 goto out;
aa129a22 535
4f186f8b 536 if (dm_suspended_md(md)) {
aa129a22
MB
537 r = -EAGAIN;
538 goto out;
539 }
540
4d341d82 541 r = tgt->type->ioctl(tgt, cmd, arg);
aa129a22
MB
542
543out:
83d5e5b0 544 dm_put_live_table(md, srcu_idx);
aa129a22 545
6c182cd8
HR
546 if (r == -ENOTCONN) {
547 msleep(10);
548 goto retry;
549 }
550
aa129a22
MB
551 return r;
552}
553
028867ac 554static struct dm_io *alloc_io(struct mapped_device *md)
1da177e4
LT
555{
556 return mempool_alloc(md->io_pool, GFP_NOIO);
557}
558
028867ac 559static void free_io(struct mapped_device *md, struct dm_io *io)
1da177e4
LT
560{
561 mempool_free(io, md->io_pool);
562}
563
028867ac 564static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
1da177e4 565{
dba14160 566 bio_put(&tio->clone);
1da177e4
LT
567}
568
08885643
KU
569static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
570 gfp_t gfp_mask)
cec47e3d 571{
5f015204 572 return mempool_alloc(md->io_pool, gfp_mask);
cec47e3d
KU
573}
574
575static void free_rq_tio(struct dm_rq_target_io *tio)
576{
5f015204 577 mempool_free(tio, tio->md->io_pool);
cec47e3d
KU
578}
579
90abb8c4
KU
580static int md_in_flight(struct mapped_device *md)
581{
582 return atomic_read(&md->pending[READ]) +
583 atomic_read(&md->pending[WRITE]);
584}
585
3eaf840e
JNN
586static void start_io_acct(struct dm_io *io)
587{
588 struct mapped_device *md = io->md;
fd2ed4d2 589 struct bio *bio = io->bio;
c9959059 590 int cpu;
fd2ed4d2 591 int rw = bio_data_dir(bio);
3eaf840e
JNN
592
593 io->start_time = jiffies;
594
074a7aca
TH
595 cpu = part_stat_lock();
596 part_round_stats(cpu, &dm_disk(md)->part0);
597 part_stat_unlock();
1e9bb880
SL
598 atomic_set(&dm_disk(md)->part0.in_flight[rw],
599 atomic_inc_return(&md->pending[rw]));
fd2ed4d2
MP
600
601 if (unlikely(dm_stats_used(&md->stats)))
4f024f37 602 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
fd2ed4d2 603 bio_sectors(bio), false, 0, &io->stats_aux);
3eaf840e
JNN
604}
605
d221d2e7 606static void end_io_acct(struct dm_io *io)
3eaf840e
JNN
607{
608 struct mapped_device *md = io->md;
609 struct bio *bio = io->bio;
610 unsigned long duration = jiffies - io->start_time;
18c0b223 611 int pending;
3eaf840e
JNN
612 int rw = bio_data_dir(bio);
613
18c0b223 614 generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
3eaf840e 615
fd2ed4d2 616 if (unlikely(dm_stats_used(&md->stats)))
4f024f37 617 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
fd2ed4d2
MP
618 bio_sectors(bio), true, duration, &io->stats_aux);
619
af7e466a
MP
620 /*
621 * After this is decremented the bio must not be touched if it is
d87f4c14 622 * a flush.
af7e466a 623 */
1e9bb880
SL
624 pending = atomic_dec_return(&md->pending[rw]);
625 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
316d315b 626 pending += atomic_read(&md->pending[rw^0x1]);
3eaf840e 627
d221d2e7
MP
628 /* nudge anyone waiting on suspend queue */
629 if (!pending)
630 wake_up(&md->wait);
3eaf840e
JNN
631}
632
1da177e4
LT
633/*
634 * Add the bio to the list of deferred io.
635 */
92c63902 636static void queue_io(struct mapped_device *md, struct bio *bio)
1da177e4 637{
05447420 638 unsigned long flags;
1da177e4 639
05447420 640 spin_lock_irqsave(&md->deferred_lock, flags);
1da177e4 641 bio_list_add(&md->deferred, bio);
05447420 642 spin_unlock_irqrestore(&md->deferred_lock, flags);
6a8736d1 643 queue_work(md->wq, &md->work);
1da177e4
LT
644}
645
646/*
647 * Everyone (including functions in this file), should use this
648 * function to access the md->map field, and make sure they call
83d5e5b0 649 * dm_put_live_table() when finished.
1da177e4 650 */
83d5e5b0 651struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
1da177e4 652{
83d5e5b0
MP
653 *srcu_idx = srcu_read_lock(&md->io_barrier);
654
655 return srcu_dereference(md->map, &md->io_barrier);
656}
1da177e4 657
83d5e5b0
MP
658void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
659{
660 srcu_read_unlock(&md->io_barrier, srcu_idx);
661}
662
663void dm_sync_table(struct mapped_device *md)
664{
665 synchronize_srcu(&md->io_barrier);
666 synchronize_rcu_expedited();
667}
668
669/*
670 * A fast alternative to dm_get_live_table/dm_put_live_table.
671 * The caller must not block between these two functions.
672 */
673static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
674{
675 rcu_read_lock();
676 return rcu_dereference(md->map);
677}
1da177e4 678
83d5e5b0
MP
679static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
680{
681 rcu_read_unlock();
1da177e4
LT
682}
683
86f1152b
BM
684/*
685 * Open a table device so we can use it as a map destination.
686 */
687static int open_table_device(struct table_device *td, dev_t dev,
688 struct mapped_device *md)
689{
690 static char *_claim_ptr = "I belong to device-mapper";
691 struct block_device *bdev;
692
693 int r;
694
695 BUG_ON(td->dm_dev.bdev);
696
697 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr);
698 if (IS_ERR(bdev))
699 return PTR_ERR(bdev);
700
701 r = bd_link_disk_holder(bdev, dm_disk(md));
702 if (r) {
703 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
704 return r;
705 }
706
707 td->dm_dev.bdev = bdev;
708 return 0;
709}
710
711/*
712 * Close a table device that we've been using.
713 */
714static void close_table_device(struct table_device *td, struct mapped_device *md)
715{
716 if (!td->dm_dev.bdev)
717 return;
718
719 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
720 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
721 td->dm_dev.bdev = NULL;
722}
723
724static struct table_device *find_table_device(struct list_head *l, dev_t dev,
725 fmode_t mode) {
726 struct table_device *td;
727
728 list_for_each_entry(td, l, list)
729 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
730 return td;
731
732 return NULL;
733}
734
735int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
736 struct dm_dev **result) {
737 int r;
738 struct table_device *td;
739
740 mutex_lock(&md->table_devices_lock);
741 td = find_table_device(&md->table_devices, dev, mode);
742 if (!td) {
743 td = kmalloc(sizeof(*td), GFP_KERNEL);
744 if (!td) {
745 mutex_unlock(&md->table_devices_lock);
746 return -ENOMEM;
747 }
748
749 td->dm_dev.mode = mode;
750 td->dm_dev.bdev = NULL;
751
752 if ((r = open_table_device(td, dev, md))) {
753 mutex_unlock(&md->table_devices_lock);
754 kfree(td);
755 return r;
756 }
757
758 format_dev_t(td->dm_dev.name, dev);
759
760 atomic_set(&td->count, 0);
761 list_add(&td->list, &md->table_devices);
762 }
763 atomic_inc(&td->count);
764 mutex_unlock(&md->table_devices_lock);
765
766 *result = &td->dm_dev;
767 return 0;
768}
769EXPORT_SYMBOL_GPL(dm_get_table_device);
770
771void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
772{
773 struct table_device *td = container_of(d, struct table_device, dm_dev);
774
775 mutex_lock(&md->table_devices_lock);
776 if (atomic_dec_and_test(&td->count)) {
777 close_table_device(td, md);
778 list_del(&td->list);
779 kfree(td);
780 }
781 mutex_unlock(&md->table_devices_lock);
782}
783EXPORT_SYMBOL(dm_put_table_device);
784
785static void free_table_devices(struct list_head *devices)
786{
787 struct list_head *tmp, *next;
788
789 list_for_each_safe(tmp, next, devices) {
790 struct table_device *td = list_entry(tmp, struct table_device, list);
791
792 DMWARN("dm_destroy: %s still exists with %d references",
793 td->dm_dev.name, atomic_read(&td->count));
794 kfree(td);
795 }
796}
797
3ac51e74
DW
798/*
799 * Get the geometry associated with a dm device
800 */
801int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
802{
803 *geo = md->geometry;
804
805 return 0;
806}
807
808/*
809 * Set the geometry of a device.
810 */
811int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
812{
813 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
814
815 if (geo->start > sz) {
816 DMWARN("Start sector is beyond the geometry limits.");
817 return -EINVAL;
818 }
819
820 md->geometry = *geo;
821
822 return 0;
823}
824
1da177e4
LT
825/*-----------------------------------------------------------------
826 * CRUD START:
827 * A more elegant soln is in the works that uses the queue
828 * merge fn, unfortunately there are a couple of changes to
829 * the block layer that I want to make for this. So in the
830 * interests of getting something for people to use I give
831 * you this clearly demarcated crap.
832 *---------------------------------------------------------------*/
833
2e93ccc1
KU
834static int __noflush_suspending(struct mapped_device *md)
835{
836 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
837}
838
1da177e4
LT
839/*
840 * Decrements the number of outstanding ios that a bio has been
841 * cloned into, completing the original io if necc.
842 */
858119e1 843static void dec_pending(struct dm_io *io, int error)
1da177e4 844{
2e93ccc1 845 unsigned long flags;
b35f8caa
MB
846 int io_error;
847 struct bio *bio;
848 struct mapped_device *md = io->md;
2e93ccc1
KU
849
850 /* Push-back supersedes any I/O errors */
f88fb981
KU
851 if (unlikely(error)) {
852 spin_lock_irqsave(&io->endio_lock, flags);
853 if (!(io->error > 0 && __noflush_suspending(md)))
854 io->error = error;
855 spin_unlock_irqrestore(&io->endio_lock, flags);
856 }
1da177e4
LT
857
858 if (atomic_dec_and_test(&io->io_count)) {
2e93ccc1
KU
859 if (io->error == DM_ENDIO_REQUEUE) {
860 /*
861 * Target requested pushing back the I/O.
2e93ccc1 862 */
022c2611 863 spin_lock_irqsave(&md->deferred_lock, flags);
6a8736d1
TH
864 if (__noflush_suspending(md))
865 bio_list_add_head(&md->deferred, io->bio);
866 else
2e93ccc1
KU
867 /* noflush suspend was interrupted. */
868 io->error = -EIO;
022c2611 869 spin_unlock_irqrestore(&md->deferred_lock, flags);
2e93ccc1
KU
870 }
871
b35f8caa
MB
872 io_error = io->error;
873 bio = io->bio;
6a8736d1
TH
874 end_io_acct(io);
875 free_io(md, io);
876
877 if (io_error == DM_ENDIO_REQUEUE)
878 return;
2e93ccc1 879
4f024f37 880 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
af7e466a 881 /*
6a8736d1
TH
882 * Preflush done for flush with data, reissue
883 * without REQ_FLUSH.
af7e466a 884 */
6a8736d1
TH
885 bio->bi_rw &= ~REQ_FLUSH;
886 queue_io(md, bio);
af7e466a 887 } else {
b372d360 888 /* done with normal IO or empty flush */
0a82a8d1 889 trace_block_bio_complete(md->queue, bio, io_error);
b372d360 890 bio_endio(bio, io_error);
b35f8caa 891 }
1da177e4
LT
892 }
893}
894
7eee4ae2
MS
895static void disable_write_same(struct mapped_device *md)
896{
897 struct queue_limits *limits = dm_get_queue_limits(md);
898
899 /* device doesn't really support WRITE SAME, disable it */
900 limits->max_write_same_sectors = 0;
901}
902
6712ecf8 903static void clone_endio(struct bio *bio, int error)
1da177e4 904{
5164bece 905 int r = error;
bfc6d41c 906 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
b35f8caa 907 struct dm_io *io = tio->io;
9faf400f 908 struct mapped_device *md = tio->io->md;
1da177e4
LT
909 dm_endio_fn endio = tio->ti->type->end_io;
910
1da177e4
LT
911 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
912 error = -EIO;
913
914 if (endio) {
7de3ee57 915 r = endio(tio->ti, bio, error);
2e93ccc1
KU
916 if (r < 0 || r == DM_ENDIO_REQUEUE)
917 /*
918 * error and requeue request are handled
919 * in dec_pending().
920 */
1da177e4 921 error = r;
45cbcd79
KU
922 else if (r == DM_ENDIO_INCOMPLETE)
923 /* The target will handle the io */
6712ecf8 924 return;
45cbcd79
KU
925 else if (r) {
926 DMWARN("unimplemented target endio return value: %d", r);
927 BUG();
928 }
1da177e4
LT
929 }
930
7eee4ae2
MS
931 if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
932 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
933 disable_write_same(md);
934
9faf400f 935 free_tio(md, tio);
b35f8caa 936 dec_pending(io, error);
1da177e4
LT
937}
938
cec47e3d
KU
939/*
940 * Partial completion handling for request-based dm
941 */
942static void end_clone_bio(struct bio *clone, int error)
943{
bfc6d41c
MP
944 struct dm_rq_clone_bio_info *info =
945 container_of(clone, struct dm_rq_clone_bio_info, clone);
cec47e3d
KU
946 struct dm_rq_target_io *tio = info->tio;
947 struct bio *bio = info->orig;
4f024f37 948 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
cec47e3d
KU
949
950 bio_put(clone);
951
952 if (tio->error)
953 /*
954 * An error has already been detected on the request.
955 * Once error occurred, just let clone->end_io() handle
956 * the remainder.
957 */
958 return;
959 else if (error) {
960 /*
961 * Don't notice the error to the upper layer yet.
962 * The error handling decision is made by the target driver,
963 * when the request is completed.
964 */
965 tio->error = error;
966 return;
967 }
968
969 /*
970 * I/O for the bio successfully completed.
971 * Notice the data completion to the upper layer.
972 */
973
974 /*
975 * bios are processed from the head of the list.
976 * So the completing bio should always be rq->bio.
977 * If it's not, something wrong is happening.
978 */
979 if (tio->orig->bio != bio)
980 DMERR("bio completion is going in the middle of the request");
981
982 /*
983 * Update the original request.
984 * Do not use blk_end_request() here, because it may complete
985 * the original request before the clone, and break the ordering.
986 */
987 blk_update_request(tio->orig, 0, nr_bytes);
988}
989
990/*
991 * Don't touch any member of the md after calling this function because
992 * the md may be freed in dm_put() at the end of this function.
993 * Or do dm_get() before calling this function and dm_put() later.
994 */
b4324fee 995static void rq_completed(struct mapped_device *md, int rw, int run_queue)
cec47e3d 996{
b4324fee 997 atomic_dec(&md->pending[rw]);
cec47e3d
KU
998
999 /* nudge anyone waiting on suspend queue */
b4324fee 1000 if (!md_in_flight(md))
cec47e3d
KU
1001 wake_up(&md->wait);
1002
a8c32a5c
JA
1003 /*
1004 * Run this off this callpath, as drivers could invoke end_io while
1005 * inside their request_fn (and holding the queue lock). Calling
1006 * back into ->request_fn() could deadlock attempting to grab the
1007 * queue lock again.
1008 */
cec47e3d 1009 if (run_queue)
a8c32a5c 1010 blk_run_queue_async(md->queue);
cec47e3d
KU
1011
1012 /*
1013 * dm_put() must be at the end of this function. See the comment above
1014 */
1015 dm_put(md);
1016}
1017
a77e28c7
KU
1018static void free_rq_clone(struct request *clone)
1019{
1020 struct dm_rq_target_io *tio = clone->end_io_data;
1021
1022 blk_rq_unprep_clone(clone);
1023 free_rq_tio(tio);
1024}
1025
980691e5
KU
1026/*
1027 * Complete the clone and the original request.
1028 * Must be called without queue lock.
1029 */
1030static void dm_end_request(struct request *clone, int error)
1031{
1032 int rw = rq_data_dir(clone);
1033 struct dm_rq_target_io *tio = clone->end_io_data;
1034 struct mapped_device *md = tio->md;
1035 struct request *rq = tio->orig;
1036
29e4013d 1037 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
980691e5
KU
1038 rq->errors = clone->errors;
1039 rq->resid_len = clone->resid_len;
1040
1041 if (rq->sense)
1042 /*
1043 * We are using the sense buffer of the original
1044 * request.
1045 * So setting the length of the sense data is enough.
1046 */
1047 rq->sense_len = clone->sense_len;
1048 }
1049
1050 free_rq_clone(clone);
29e4013d
TH
1051 blk_end_request_all(rq, error);
1052 rq_completed(md, rw, true);
980691e5
KU
1053}
1054
cec47e3d
KU
1055static void dm_unprep_request(struct request *rq)
1056{
1057 struct request *clone = rq->special;
cec47e3d
KU
1058
1059 rq->special = NULL;
1060 rq->cmd_flags &= ~REQ_DONTPREP;
1061
a77e28c7 1062 free_rq_clone(clone);
cec47e3d
KU
1063}
1064
1065/*
1066 * Requeue the original request of a clone.
1067 */
1068void dm_requeue_unmapped_request(struct request *clone)
1069{
b4324fee 1070 int rw = rq_data_dir(clone);
cec47e3d
KU
1071 struct dm_rq_target_io *tio = clone->end_io_data;
1072 struct mapped_device *md = tio->md;
1073 struct request *rq = tio->orig;
1074 struct request_queue *q = rq->q;
1075 unsigned long flags;
1076
1077 dm_unprep_request(rq);
1078
1079 spin_lock_irqsave(q->queue_lock, flags);
cec47e3d
KU
1080 blk_requeue_request(q, rq);
1081 spin_unlock_irqrestore(q->queue_lock, flags);
1082
b4324fee 1083 rq_completed(md, rw, 0);
cec47e3d
KU
1084}
1085EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
1086
1087static void __stop_queue(struct request_queue *q)
1088{
1089 blk_stop_queue(q);
1090}
1091
1092static void stop_queue(struct request_queue *q)
1093{
1094 unsigned long flags;
1095
1096 spin_lock_irqsave(q->queue_lock, flags);
1097 __stop_queue(q);
1098 spin_unlock_irqrestore(q->queue_lock, flags);
1099}
1100
1101static void __start_queue(struct request_queue *q)
1102{
1103 if (blk_queue_stopped(q))
1104 blk_start_queue(q);
1105}
1106
1107static void start_queue(struct request_queue *q)
1108{
1109 unsigned long flags;
1110
1111 spin_lock_irqsave(q->queue_lock, flags);
1112 __start_queue(q);
1113 spin_unlock_irqrestore(q->queue_lock, flags);
1114}
1115
11a68244 1116static void dm_done(struct request *clone, int error, bool mapped)
cec47e3d 1117{
11a68244 1118 int r = error;
cec47e3d 1119 struct dm_rq_target_io *tio = clone->end_io_data;
ba1cbad9 1120 dm_request_endio_fn rq_end_io = NULL;
cec47e3d 1121
ba1cbad9
MS
1122 if (tio->ti) {
1123 rq_end_io = tio->ti->type->rq_end_io;
1124
1125 if (mapped && rq_end_io)
1126 r = rq_end_io(tio->ti, clone, error, &tio->info);
1127 }
cec47e3d 1128
7eee4ae2
MS
1129 if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
1130 !clone->q->limits.max_write_same_sectors))
1131 disable_write_same(tio->md);
1132
11a68244 1133 if (r <= 0)
cec47e3d 1134 /* The target wants to complete the I/O */
11a68244
KU
1135 dm_end_request(clone, r);
1136 else if (r == DM_ENDIO_INCOMPLETE)
cec47e3d
KU
1137 /* The target will handle the I/O */
1138 return;
11a68244 1139 else if (r == DM_ENDIO_REQUEUE)
cec47e3d
KU
1140 /* The target wants to requeue the I/O */
1141 dm_requeue_unmapped_request(clone);
1142 else {
11a68244 1143 DMWARN("unimplemented target endio return value: %d", r);
cec47e3d
KU
1144 BUG();
1145 }
1146}
1147
11a68244
KU
1148/*
1149 * Request completion handler for request-based dm
1150 */
1151static void dm_softirq_done(struct request *rq)
1152{
1153 bool mapped = true;
1154 struct request *clone = rq->completion_data;
1155 struct dm_rq_target_io *tio = clone->end_io_data;
1156
1157 if (rq->cmd_flags & REQ_FAILED)
1158 mapped = false;
1159
1160 dm_done(clone, tio->error, mapped);
1161}
1162
cec47e3d
KU
1163/*
1164 * Complete the clone and the original request with the error status
1165 * through softirq context.
1166 */
1167static void dm_complete_request(struct request *clone, int error)
1168{
1169 struct dm_rq_target_io *tio = clone->end_io_data;
1170 struct request *rq = tio->orig;
1171
1172 tio->error = error;
1173 rq->completion_data = clone;
1174 blk_complete_request(rq);
1175}
1176
1177/*
1178 * Complete the not-mapped clone and the original request with the error status
1179 * through softirq context.
1180 * Target's rq_end_io() function isn't called.
1181 * This may be used when the target's map_rq() function fails.
1182 */
1183void dm_kill_unmapped_request(struct request *clone, int error)
1184{
1185 struct dm_rq_target_io *tio = clone->end_io_data;
1186 struct request *rq = tio->orig;
1187
1188 rq->cmd_flags |= REQ_FAILED;
1189 dm_complete_request(clone, error);
1190}
1191EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
1192
1193/*
1194 * Called with the queue lock held
1195 */
1196static void end_clone_request(struct request *clone, int error)
1197{
1198 /*
1199 * For just cleaning up the information of the queue in which
1200 * the clone was dispatched.
1201 * The clone is *NOT* freed actually here because it is alloced from
1202 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1203 */
1204 __blk_put_request(clone->q, clone);
1205
1206 /*
1207 * Actual request completion is done in a softirq context which doesn't
1208 * hold the queue lock. Otherwise, deadlock could occur because:
1209 * - another request may be submitted by the upper level driver
1210 * of the stacking during the completion
1211 * - the submission which requires queue lock may be done
1212 * against this queue
1213 */
1214 dm_complete_request(clone, error);
1215}
1216
56a67df7
MS
1217/*
1218 * Return maximum size of I/O possible at the supplied sector up to the current
1219 * target boundary.
1220 */
1221static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1222{
1223 sector_t target_offset = dm_target_offset(ti, sector);
1224
1225 return ti->len - target_offset;
1226}
1227
1228static sector_t max_io_len(sector_t sector, struct dm_target *ti)
1da177e4 1229{
56a67df7 1230 sector_t len = max_io_len_target_boundary(sector, ti);
542f9038 1231 sector_t offset, max_len;
1da177e4
LT
1232
1233 /*
542f9038 1234 * Does the target need to split even further?
1da177e4 1235 */
542f9038
MS
1236 if (ti->max_io_len) {
1237 offset = dm_target_offset(ti, sector);
1238 if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
1239 max_len = sector_div(offset, ti->max_io_len);
1240 else
1241 max_len = offset & (ti->max_io_len - 1);
1242 max_len = ti->max_io_len - max_len;
1243
1244 if (len > max_len)
1245 len = max_len;
1da177e4
LT
1246 }
1247
1248 return len;
1249}
1250
542f9038
MS
1251int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1252{
1253 if (len > UINT_MAX) {
1254 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1255 (unsigned long long)len, UINT_MAX);
1256 ti->error = "Maximum size of target IO is too large";
1257 return -EINVAL;
1258 }
1259
1260 ti->max_io_len = (uint32_t) len;
1261
1262 return 0;
1263}
1264EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1265
1dd40c3e
MP
1266/*
1267 * A target may call dm_accept_partial_bio only from the map routine. It is
1268 * allowed for all bio types except REQ_FLUSH.
1269 *
1270 * dm_accept_partial_bio informs the dm that the target only wants to process
1271 * additional n_sectors sectors of the bio and the rest of the data should be
1272 * sent in a next bio.
1273 *
1274 * A diagram that explains the arithmetics:
1275 * +--------------------+---------------+-------+
1276 * | 1 | 2 | 3 |
1277 * +--------------------+---------------+-------+
1278 *
1279 * <-------------- *tio->len_ptr --------------->
1280 * <------- bi_size ------->
1281 * <-- n_sectors -->
1282 *
1283 * Region 1 was already iterated over with bio_advance or similar function.
1284 * (it may be empty if the target doesn't use bio_advance)
1285 * Region 2 is the remaining bio size that the target wants to process.
1286 * (it may be empty if region 1 is non-empty, although there is no reason
1287 * to make it empty)
1288 * The target requires that region 3 is to be sent in the next bio.
1289 *
1290 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1291 * the partially processed part (the sum of regions 1+2) must be the same for all
1292 * copies of the bio.
1293 */
1294void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1295{
1296 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1297 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1298 BUG_ON(bio->bi_rw & REQ_FLUSH);
1299 BUG_ON(bi_size > *tio->len_ptr);
1300 BUG_ON(n_sectors > bi_size);
1301 *tio->len_ptr -= bi_size - n_sectors;
1302 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1303}
1304EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1305
bd2a49b8 1306static void __map_bio(struct dm_target_io *tio)
1da177e4
LT
1307{
1308 int r;
2056a782 1309 sector_t sector;
9faf400f 1310 struct mapped_device *md;
dba14160 1311 struct bio *clone = &tio->clone;
bd2a49b8 1312 struct dm_target *ti = tio->ti;
1da177e4 1313
1da177e4 1314 clone->bi_end_io = clone_endio;
1da177e4
LT
1315
1316 /*
1317 * Map the clone. If r == 0 we don't need to do
1318 * anything, the target has assumed ownership of
1319 * this io.
1320 */
1321 atomic_inc(&tio->io->io_count);
4f024f37 1322 sector = clone->bi_iter.bi_sector;
7de3ee57 1323 r = ti->type->map(ti, clone);
45cbcd79 1324 if (r == DM_MAPIO_REMAPPED) {
1da177e4 1325 /* the bio has been remapped so dispatch it */
2056a782 1326
d07335e5
MS
1327 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1328 tio->io->bio->bi_bdev->bd_dev, sector);
2056a782 1329
1da177e4 1330 generic_make_request(clone);
2e93ccc1
KU
1331 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1332 /* error the io and bail out, or requeue it if needed */
9faf400f
SB
1333 md = tio->io->md;
1334 dec_pending(tio->io, r);
9faf400f 1335 free_tio(md, tio);
45cbcd79
KU
1336 } else if (r) {
1337 DMWARN("unimplemented target map return value: %d", r);
1338 BUG();
1da177e4
LT
1339 }
1340}
1341
1342struct clone_info {
1343 struct mapped_device *md;
1344 struct dm_table *map;
1345 struct bio *bio;
1346 struct dm_io *io;
1347 sector_t sector;
e0d6609a 1348 unsigned sector_count;
1da177e4
LT
1349};
1350
e0d6609a 1351static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
bd2a49b8 1352{
4f024f37
KO
1353 bio->bi_iter.bi_sector = sector;
1354 bio->bi_iter.bi_size = to_bytes(len);
1da177e4
LT
1355}
1356
1357/*
1358 * Creates a bio that consists of range of complete bvecs.
1359 */
dba14160 1360static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1c3b13e6 1361 sector_t sector, unsigned len)
1da177e4 1362{
dba14160 1363 struct bio *clone = &tio->clone;
1da177e4 1364
1c3b13e6
KO
1365 __bio_clone_fast(clone, bio);
1366
1367 if (bio_integrity(bio))
1368 bio_integrity_clone(clone, bio, GFP_NOIO);
bd2a49b8 1369
1c3b13e6
KO
1370 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1371 clone->bi_iter.bi_size = to_bytes(len);
1372
1373 if (bio_integrity(bio))
1374 bio_integrity_trim(clone, 0, len);
1da177e4
LT
1375}
1376
9015df24 1377static struct dm_target_io *alloc_tio(struct clone_info *ci,
99778273 1378 struct dm_target *ti,
55a62eef 1379 unsigned target_bio_nr)
f9ab94ce 1380{
dba14160
MP
1381 struct dm_target_io *tio;
1382 struct bio *clone;
1383
99778273 1384 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
dba14160 1385 tio = container_of(clone, struct dm_target_io, clone);
f9ab94ce
MP
1386
1387 tio->io = ci->io;
1388 tio->ti = ti;
55a62eef 1389 tio->target_bio_nr = target_bio_nr;
9015df24
AK
1390
1391 return tio;
1392}
1393
14fe594d
AK
1394static void __clone_and_map_simple_bio(struct clone_info *ci,
1395 struct dm_target *ti,
1dd40c3e 1396 unsigned target_bio_nr, unsigned *len)
9015df24 1397{
99778273 1398 struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
dba14160 1399 struct bio *clone = &tio->clone;
9015df24 1400
1dd40c3e
MP
1401 tio->len_ptr = len;
1402
99778273 1403 __bio_clone_fast(clone, ci->bio);
bd2a49b8 1404 if (len)
1dd40c3e 1405 bio_setup_sector(clone, ci->sector, *len);
f9ab94ce 1406
bd2a49b8 1407 __map_bio(tio);
f9ab94ce
MP
1408}
1409
14fe594d 1410static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1dd40c3e 1411 unsigned num_bios, unsigned *len)
06a426ce 1412{
55a62eef 1413 unsigned target_bio_nr;
06a426ce 1414
55a62eef 1415 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
14fe594d 1416 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
06a426ce
MS
1417}
1418
14fe594d 1419static int __send_empty_flush(struct clone_info *ci)
f9ab94ce 1420{
06a426ce 1421 unsigned target_nr = 0;
f9ab94ce
MP
1422 struct dm_target *ti;
1423
b372d360 1424 BUG_ON(bio_has_data(ci->bio));
f9ab94ce 1425 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1dd40c3e 1426 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
f9ab94ce 1427
f9ab94ce
MP
1428 return 0;
1429}
1430
e4c93811 1431static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1dd40c3e 1432 sector_t sector, unsigned *len)
5ae89a87 1433{
dba14160 1434 struct bio *bio = ci->bio;
5ae89a87 1435 struct dm_target_io *tio;
b0d8ed4d
AK
1436 unsigned target_bio_nr;
1437 unsigned num_target_bios = 1;
5ae89a87 1438
b0d8ed4d
AK
1439 /*
1440 * Does the target want to receive duplicate copies of the bio?
1441 */
1442 if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1443 num_target_bios = ti->num_write_bios(ti, bio);
e4c93811 1444
b0d8ed4d 1445 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
99778273 1446 tio = alloc_tio(ci, ti, target_bio_nr);
1dd40c3e
MP
1447 tio->len_ptr = len;
1448 clone_bio(tio, bio, sector, *len);
b0d8ed4d
AK
1449 __map_bio(tio);
1450 }
5ae89a87
MS
1451}
1452
55a62eef 1453typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
23508a96 1454
55a62eef 1455static unsigned get_num_discard_bios(struct dm_target *ti)
23508a96 1456{
55a62eef 1457 return ti->num_discard_bios;
23508a96
MS
1458}
1459
55a62eef 1460static unsigned get_num_write_same_bios(struct dm_target *ti)
23508a96 1461{
55a62eef 1462 return ti->num_write_same_bios;
23508a96
MS
1463}
1464
1465typedef bool (*is_split_required_fn)(struct dm_target *ti);
1466
1467static bool is_split_required_for_discard(struct dm_target *ti)
1468{
55a62eef 1469 return ti->split_discard_bios;
23508a96
MS
1470}
1471
14fe594d
AK
1472static int __send_changing_extent_only(struct clone_info *ci,
1473 get_num_bios_fn get_num_bios,
1474 is_split_required_fn is_split_required)
5ae89a87
MS
1475{
1476 struct dm_target *ti;
e0d6609a 1477 unsigned len;
55a62eef 1478 unsigned num_bios;
5ae89a87 1479
a79245b3
MS
1480 do {
1481 ti = dm_table_find_target(ci->map, ci->sector);
1482 if (!dm_target_is_valid(ti))
1483 return -EIO;
5ae89a87 1484
5ae89a87 1485 /*
23508a96
MS
1486 * Even though the device advertised support for this type of
1487 * request, that does not mean every target supports it, and
936688d7 1488 * reconfiguration might also have changed that since the
a79245b3 1489 * check was performed.
5ae89a87 1490 */
55a62eef
AK
1491 num_bios = get_num_bios ? get_num_bios(ti) : 0;
1492 if (!num_bios)
a79245b3 1493 return -EOPNOTSUPP;
5ae89a87 1494
23508a96 1495 if (is_split_required && !is_split_required(ti))
e0d6609a 1496 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
7acf0277 1497 else
e0d6609a 1498 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
06a426ce 1499
1dd40c3e 1500 __send_duplicate_bios(ci, ti, num_bios, &len);
a79245b3
MS
1501
1502 ci->sector += len;
1503 } while (ci->sector_count -= len);
5ae89a87
MS
1504
1505 return 0;
1506}
1507
14fe594d 1508static int __send_discard(struct clone_info *ci)
23508a96 1509{
14fe594d
AK
1510 return __send_changing_extent_only(ci, get_num_discard_bios,
1511 is_split_required_for_discard);
23508a96
MS
1512}
1513
14fe594d 1514static int __send_write_same(struct clone_info *ci)
23508a96 1515{
14fe594d 1516 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
23508a96
MS
1517}
1518
e4c93811
AK
1519/*
1520 * Select the correct strategy for processing a non-flush bio.
1521 */
14fe594d 1522static int __split_and_process_non_flush(struct clone_info *ci)
1da177e4 1523{
dba14160 1524 struct bio *bio = ci->bio;
512875bd 1525 struct dm_target *ti;
1c3b13e6 1526 unsigned len;
1da177e4 1527
5ae89a87 1528 if (unlikely(bio->bi_rw & REQ_DISCARD))
14fe594d 1529 return __send_discard(ci);
23508a96 1530 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
14fe594d 1531 return __send_write_same(ci);
5ae89a87 1532
512875bd
JN
1533 ti = dm_table_find_target(ci->map, ci->sector);
1534 if (!dm_target_is_valid(ti))
1535 return -EIO;
1536
1c3b13e6 1537 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1da177e4 1538
1dd40c3e 1539 __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1da177e4 1540
1c3b13e6
KO
1541 ci->sector += len;
1542 ci->sector_count -= len;
1da177e4 1543
1c3b13e6 1544 return 0;
1da177e4
LT
1545}
1546
1547/*
14fe594d 1548 * Entry point to split a bio into clones and submit them to the targets.
1da177e4 1549 */
83d5e5b0
MP
1550static void __split_and_process_bio(struct mapped_device *md,
1551 struct dm_table *map, struct bio *bio)
1da177e4
LT
1552{
1553 struct clone_info ci;
512875bd 1554 int error = 0;
1da177e4 1555
83d5e5b0 1556 if (unlikely(!map)) {
6a8736d1 1557 bio_io_error(bio);
f0b9a450
MP
1558 return;
1559 }
692d0eb9 1560
83d5e5b0 1561 ci.map = map;
1da177e4 1562 ci.md = md;
1da177e4
LT
1563 ci.io = alloc_io(md);
1564 ci.io->error = 0;
1565 atomic_set(&ci.io->io_count, 1);
1566 ci.io->bio = bio;
1567 ci.io->md = md;
f88fb981 1568 spin_lock_init(&ci.io->endio_lock);
4f024f37 1569 ci.sector = bio->bi_iter.bi_sector;
1da177e4 1570
3eaf840e 1571 start_io_acct(ci.io);
bd2a49b8 1572
b372d360
MS
1573 if (bio->bi_rw & REQ_FLUSH) {
1574 ci.bio = &ci.md->flush_bio;
1575 ci.sector_count = 0;
14fe594d 1576 error = __send_empty_flush(&ci);
b372d360
MS
1577 /* dec_pending submits any data associated with flush */
1578 } else {
6a8736d1 1579 ci.bio = bio;
d87f4c14 1580 ci.sector_count = bio_sectors(bio);
b372d360 1581 while (ci.sector_count && !error)
14fe594d 1582 error = __split_and_process_non_flush(&ci);
d87f4c14 1583 }
1da177e4
LT
1584
1585 /* drop the extra reference count */
512875bd 1586 dec_pending(ci.io, error);
1da177e4
LT
1587}
1588/*-----------------------------------------------------------------
1589 * CRUD END
1590 *---------------------------------------------------------------*/
1591
f6fccb12
MB
1592static int dm_merge_bvec(struct request_queue *q,
1593 struct bvec_merge_data *bvm,
1594 struct bio_vec *biovec)
1595{
1596 struct mapped_device *md = q->queuedata;
83d5e5b0 1597 struct dm_table *map = dm_get_live_table_fast(md);
f6fccb12
MB
1598 struct dm_target *ti;
1599 sector_t max_sectors;
5037108a 1600 int max_size = 0;
f6fccb12
MB
1601
1602 if (unlikely(!map))
5037108a 1603 goto out;
f6fccb12
MB
1604
1605 ti = dm_table_find_target(map, bvm->bi_sector);
b01cd5ac 1606 if (!dm_target_is_valid(ti))
83d5e5b0 1607 goto out;
f6fccb12
MB
1608
1609 /*
1610 * Find maximum amount of I/O that won't need splitting
1611 */
56a67df7 1612 max_sectors = min(max_io_len(bvm->bi_sector, ti),
148e51ba 1613 (sector_t) queue_max_sectors(q));
f6fccb12 1614 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
148e51ba 1615 if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */
f6fccb12
MB
1616 max_size = 0;
1617
1618 /*
1619 * merge_bvec_fn() returns number of bytes
1620 * it can accept at this offset
1621 * max is precomputed maximal io size
1622 */
1623 if (max_size && ti->type->merge)
1624 max_size = ti->type->merge(ti, bvm, biovec, max_size);
8cbeb67a
MP
1625 /*
1626 * If the target doesn't support merge method and some of the devices
148e51ba
MS
1627 * provided their merge_bvec method (we know this by looking for the
1628 * max_hw_sectors that dm_set_device_limits may set), then we can't
1629 * allow bios with multiple vector entries. So always set max_size
1630 * to 0, and the code below allows just one page.
8cbeb67a
MP
1631 */
1632 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
8cbeb67a 1633 max_size = 0;
f6fccb12 1634
5037108a 1635out:
83d5e5b0 1636 dm_put_live_table_fast(md);
f6fccb12
MB
1637 /*
1638 * Always allow an entire first page
1639 */
1640 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1641 max_size = biovec->bv_len;
1642
f6fccb12
MB
1643 return max_size;
1644}
1645
1da177e4
LT
1646/*
1647 * The request function that just remaps the bio built up by
1648 * dm_merge_bvec.
1649 */
5a7bbad2 1650static void _dm_request(struct request_queue *q, struct bio *bio)
1da177e4 1651{
12f03a49 1652 int rw = bio_data_dir(bio);
1da177e4 1653 struct mapped_device *md = q->queuedata;
83d5e5b0
MP
1654 int srcu_idx;
1655 struct dm_table *map;
1da177e4 1656
83d5e5b0 1657 map = dm_get_live_table(md, &srcu_idx);
1da177e4 1658
18c0b223 1659 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
12f03a49 1660
6a8736d1
TH
1661 /* if we're suspended, we have to queue this io for later */
1662 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
83d5e5b0 1663 dm_put_live_table(md, srcu_idx);
1da177e4 1664
6a8736d1
TH
1665 if (bio_rw(bio) != READA)
1666 queue_io(md, bio);
1667 else
54d9a1b4 1668 bio_io_error(bio);
5a7bbad2 1669 return;
1da177e4
LT
1670 }
1671
83d5e5b0
MP
1672 __split_and_process_bio(md, map, bio);
1673 dm_put_live_table(md, srcu_idx);
5a7bbad2 1674 return;
cec47e3d
KU
1675}
1676
fd2ed4d2 1677int dm_request_based(struct mapped_device *md)
cec47e3d
KU
1678{
1679 return blk_queue_stackable(md->queue);
1680}
1681
5a7bbad2 1682static void dm_request(struct request_queue *q, struct bio *bio)
cec47e3d
KU
1683{
1684 struct mapped_device *md = q->queuedata;
1685
1686 if (dm_request_based(md))
5a7bbad2
CH
1687 blk_queue_bio(q, bio);
1688 else
1689 _dm_request(q, bio);
cec47e3d
KU
1690}
1691
1692void dm_dispatch_request(struct request *rq)
1693{
1694 int r;
1695
1696 if (blk_queue_io_stat(rq->q))
1697 rq->cmd_flags |= REQ_IO_STAT;
1698
1699 rq->start_time = jiffies;
1700 r = blk_insert_cloned_request(rq->q, rq);
1701 if (r)
1702 dm_complete_request(rq, r);
1703}
1704EXPORT_SYMBOL_GPL(dm_dispatch_request);
1705
cec47e3d
KU
1706static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1707 void *data)
1708{
1709 struct dm_rq_target_io *tio = data;
94818742
KO
1710 struct dm_rq_clone_bio_info *info =
1711 container_of(bio, struct dm_rq_clone_bio_info, clone);
cec47e3d
KU
1712
1713 info->orig = bio_orig;
1714 info->tio = tio;
1715 bio->bi_end_io = end_clone_bio;
cec47e3d
KU
1716
1717 return 0;
1718}
1719
1720static int setup_clone(struct request *clone, struct request *rq,
1721 struct dm_rq_target_io *tio)
1722{
d0bcb878 1723 int r;
cec47e3d 1724
29e4013d
TH
1725 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1726 dm_rq_bio_constructor, tio);
1727 if (r)
1728 return r;
cec47e3d 1729
29e4013d
TH
1730 clone->cmd = rq->cmd;
1731 clone->cmd_len = rq->cmd_len;
1732 clone->sense = rq->sense;
cec47e3d
KU
1733 clone->end_io = end_clone_request;
1734 clone->end_io_data = tio;
1735
1736 return 0;
1737}
1738
6facdaff
KU
1739static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1740 gfp_t gfp_mask)
1741{
1742 struct request *clone;
1743 struct dm_rq_target_io *tio;
1744
1745 tio = alloc_rq_tio(md, gfp_mask);
1746 if (!tio)
1747 return NULL;
1748
1749 tio->md = md;
1750 tio->ti = NULL;
1751 tio->orig = rq;
1752 tio->error = 0;
1753 memset(&tio->info, 0, sizeof(tio->info));
1754
1755 clone = &tio->clone;
1756 if (setup_clone(clone, rq, tio)) {
1757 /* -ENOMEM */
1758 free_rq_tio(tio);
1759 return NULL;
1760 }
1761
1762 return clone;
1763}
1764
cec47e3d
KU
1765/*
1766 * Called with the queue lock held.
1767 */
1768static int dm_prep_fn(struct request_queue *q, struct request *rq)
1769{
1770 struct mapped_device *md = q->queuedata;
cec47e3d
KU
1771 struct request *clone;
1772
cec47e3d
KU
1773 if (unlikely(rq->special)) {
1774 DMWARN("Already has something in rq->special.");
1775 return BLKPREP_KILL;
1776 }
1777
6facdaff
KU
1778 clone = clone_rq(rq, md, GFP_ATOMIC);
1779 if (!clone)
cec47e3d 1780 return BLKPREP_DEFER;
cec47e3d
KU
1781
1782 rq->special = clone;
1783 rq->cmd_flags |= REQ_DONTPREP;
1784
1785 return BLKPREP_OK;
1786}
1787
9eef87da
KU
1788/*
1789 * Returns:
1790 * 0 : the request has been processed (not requeued)
1791 * !0 : the request has been requeued
1792 */
1793static int map_request(struct dm_target *ti, struct request *clone,
1794 struct mapped_device *md)
cec47e3d 1795{
9eef87da 1796 int r, requeued = 0;
cec47e3d
KU
1797 struct dm_rq_target_io *tio = clone->end_io_data;
1798
cec47e3d
KU
1799 tio->ti = ti;
1800 r = ti->type->map_rq(ti, clone, &tio->info);
1801 switch (r) {
1802 case DM_MAPIO_SUBMITTED:
1803 /* The target has taken the I/O to submit by itself later */
1804 break;
1805 case DM_MAPIO_REMAPPED:
1806 /* The target has remapped the I/O so dispatch it */
6db4ccd6
JN
1807 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1808 blk_rq_pos(tio->orig));
cec47e3d
KU
1809 dm_dispatch_request(clone);
1810 break;
1811 case DM_MAPIO_REQUEUE:
1812 /* The target wants to requeue the I/O */
1813 dm_requeue_unmapped_request(clone);
9eef87da 1814 requeued = 1;
cec47e3d
KU
1815 break;
1816 default:
1817 if (r > 0) {
1818 DMWARN("unimplemented target map return value: %d", r);
1819 BUG();
1820 }
1821
1822 /* The target wants to complete the I/O */
1823 dm_kill_unmapped_request(clone, r);
1824 break;
1825 }
9eef87da
KU
1826
1827 return requeued;
cec47e3d
KU
1828}
1829
ba1cbad9
MS
1830static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1831{
1832 struct request *clone;
1833
1834 blk_start_request(orig);
1835 clone = orig->special;
1836 atomic_inc(&md->pending[rq_data_dir(clone)]);
1837
1838 /*
1839 * Hold the md reference here for the in-flight I/O.
1840 * We can't rely on the reference count by device opener,
1841 * because the device may be closed during the request completion
1842 * when all bios are completed.
1843 * See the comment in rq_completed() too.
1844 */
1845 dm_get(md);
1846
1847 return clone;
1848}
1849
cec47e3d
KU
1850/*
1851 * q->request_fn for request-based dm.
1852 * Called with the queue lock held.
1853 */
1854static void dm_request_fn(struct request_queue *q)
1855{
1856 struct mapped_device *md = q->queuedata;
83d5e5b0
MP
1857 int srcu_idx;
1858 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
cec47e3d 1859 struct dm_target *ti;
b4324fee 1860 struct request *rq, *clone;
29e4013d 1861 sector_t pos;
cec47e3d
KU
1862
1863 /*
b4324fee
KU
1864 * For suspend, check blk_queue_stopped() and increment
1865 * ->pending within a single queue_lock not to increment the
1866 * number of in-flight I/Os after the queue is stopped in
1867 * dm_suspend().
cec47e3d 1868 */
7eaceacc 1869 while (!blk_queue_stopped(q)) {
cec47e3d
KU
1870 rq = blk_peek_request(q);
1871 if (!rq)
7eaceacc 1872 goto delay_and_out;
cec47e3d 1873
29e4013d
TH
1874 /* always use block 0 to find the target for flushes for now */
1875 pos = 0;
1876 if (!(rq->cmd_flags & REQ_FLUSH))
1877 pos = blk_rq_pos(rq);
1878
1879 ti = dm_table_find_target(map, pos);
ba1cbad9
MS
1880 if (!dm_target_is_valid(ti)) {
1881 /*
1882 * Must perform setup, that dm_done() requires,
1883 * before calling dm_kill_unmapped_request
1884 */
1885 DMERR_LIMIT("request attempted access beyond the end of device");
1886 clone = dm_start_request(md, rq);
1887 dm_kill_unmapped_request(clone, -EIO);
1888 continue;
1889 }
d0bcb878 1890
cec47e3d 1891 if (ti->type->busy && ti->type->busy(ti))
7eaceacc 1892 goto delay_and_out;
cec47e3d 1893
ba1cbad9 1894 clone = dm_start_request(md, rq);
b4324fee 1895
cec47e3d 1896 spin_unlock(q->queue_lock);
9eef87da
KU
1897 if (map_request(ti, clone, md))
1898 goto requeued;
1899
052189a2
KU
1900 BUG_ON(!irqs_disabled());
1901 spin_lock(q->queue_lock);
cec47e3d
KU
1902 }
1903
1904 goto out;
1905
9eef87da 1906requeued:
052189a2
KU
1907 BUG_ON(!irqs_disabled());
1908 spin_lock(q->queue_lock);
9eef87da 1909
7eaceacc
JA
1910delay_and_out:
1911 blk_delay_queue(q, HZ / 10);
cec47e3d 1912out:
83d5e5b0 1913 dm_put_live_table(md, srcu_idx);
cec47e3d
KU
1914}
1915
1916int dm_underlying_device_busy(struct request_queue *q)
1917{
1918 return blk_lld_busy(q);
1919}
1920EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1921
1922static int dm_lld_busy(struct request_queue *q)
1923{
1924 int r;
1925 struct mapped_device *md = q->queuedata;
83d5e5b0 1926 struct dm_table *map = dm_get_live_table_fast(md);
cec47e3d
KU
1927
1928 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1929 r = 1;
1930 else
1931 r = dm_table_any_busy_target(map);
1932
83d5e5b0 1933 dm_put_live_table_fast(md);
cec47e3d
KU
1934
1935 return r;
1936}
1937
1da177e4
LT
1938static int dm_any_congested(void *congested_data, int bdi_bits)
1939{
8a57dfc6
CS
1940 int r = bdi_bits;
1941 struct mapped_device *md = congested_data;
1942 struct dm_table *map;
1da177e4 1943
1eb787ec 1944 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
83d5e5b0 1945 map = dm_get_live_table_fast(md);
8a57dfc6 1946 if (map) {
cec47e3d
KU
1947 /*
1948 * Request-based dm cares about only own queue for
1949 * the query about congestion status of request_queue
1950 */
1951 if (dm_request_based(md))
1952 r = md->queue->backing_dev_info.state &
1953 bdi_bits;
1954 else
1955 r = dm_table_any_congested(map, bdi_bits);
8a57dfc6 1956 }
83d5e5b0 1957 dm_put_live_table_fast(md);
8a57dfc6
CS
1958 }
1959
1da177e4
LT
1960 return r;
1961}
1962
1963/*-----------------------------------------------------------------
1964 * An IDR is used to keep track of allocated minor numbers.
1965 *---------------------------------------------------------------*/
2b06cfff 1966static void free_minor(int minor)
1da177e4 1967{
f32c10b0 1968 spin_lock(&_minor_lock);
1da177e4 1969 idr_remove(&_minor_idr, minor);
f32c10b0 1970 spin_unlock(&_minor_lock);
1da177e4
LT
1971}
1972
1973/*
1974 * See if the device with a specific minor # is free.
1975 */
cf13ab8e 1976static int specific_minor(int minor)
1da177e4 1977{
c9d76be6 1978 int r;
1da177e4
LT
1979
1980 if (minor >= (1 << MINORBITS))
1981 return -EINVAL;
1982
c9d76be6 1983 idr_preload(GFP_KERNEL);
f32c10b0 1984 spin_lock(&_minor_lock);
1da177e4 1985
c9d76be6 1986 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1da177e4 1987
f32c10b0 1988 spin_unlock(&_minor_lock);
c9d76be6
TH
1989 idr_preload_end();
1990 if (r < 0)
1991 return r == -ENOSPC ? -EBUSY : r;
1992 return 0;
1da177e4
LT
1993}
1994
cf13ab8e 1995static int next_free_minor(int *minor)
1da177e4 1996{
c9d76be6 1997 int r;
62f75c2f 1998
c9d76be6 1999 idr_preload(GFP_KERNEL);
f32c10b0 2000 spin_lock(&_minor_lock);
1da177e4 2001
c9d76be6 2002 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1da177e4 2003
f32c10b0 2004 spin_unlock(&_minor_lock);
c9d76be6
TH
2005 idr_preload_end();
2006 if (r < 0)
2007 return r;
2008 *minor = r;
2009 return 0;
1da177e4
LT
2010}
2011
83d5cde4 2012static const struct block_device_operations dm_blk_dops;
1da177e4 2013
53d5914f
MP
2014static void dm_wq_work(struct work_struct *work);
2015
4a0b4ddf
MS
2016static void dm_init_md_queue(struct mapped_device *md)
2017{
2018 /*
2019 * Request-based dm devices cannot be stacked on top of bio-based dm
2020 * devices. The type of this dm device has not been decided yet.
2021 * The type is decided at the first table loading time.
2022 * To prevent problematic device stacking, clear the queue flag
2023 * for request stacking support until then.
2024 *
2025 * This queue is new, so no concurrency on the queue_flags.
2026 */
2027 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
2028
2029 md->queue->queuedata = md;
2030 md->queue->backing_dev_info.congested_fn = dm_any_congested;
2031 md->queue->backing_dev_info.congested_data = md;
2032 blk_queue_make_request(md->queue, dm_request);
2033 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
4a0b4ddf
MS
2034 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
2035}
2036
1da177e4
LT
2037/*
2038 * Allocate and initialise a blank device with a given minor.
2039 */
2b06cfff 2040static struct mapped_device *alloc_dev(int minor)
1da177e4
LT
2041{
2042 int r;
cf13ab8e 2043 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
ba61fdd1 2044 void *old_md;
1da177e4
LT
2045
2046 if (!md) {
2047 DMWARN("unable to allocate device, out of memory.");
2048 return NULL;
2049 }
2050
10da4f79 2051 if (!try_module_get(THIS_MODULE))
6ed7ade8 2052 goto bad_module_get;
10da4f79 2053
1da177e4 2054 /* get a minor number for the dev */
2b06cfff 2055 if (minor == DM_ANY_MINOR)
cf13ab8e 2056 r = next_free_minor(&minor);
2b06cfff 2057 else
cf13ab8e 2058 r = specific_minor(minor);
1da177e4 2059 if (r < 0)
6ed7ade8 2060 goto bad_minor;
1da177e4 2061
83d5e5b0
MP
2062 r = init_srcu_struct(&md->io_barrier);
2063 if (r < 0)
2064 goto bad_io_barrier;
2065
a5664dad 2066 md->type = DM_TYPE_NONE;
e61290a4 2067 mutex_init(&md->suspend_lock);
a5664dad 2068 mutex_init(&md->type_lock);
86f1152b 2069 mutex_init(&md->table_devices_lock);
022c2611 2070 spin_lock_init(&md->deferred_lock);
1da177e4 2071 atomic_set(&md->holders, 1);
5c6bd75d 2072 atomic_set(&md->open_count, 0);
1da177e4 2073 atomic_set(&md->event_nr, 0);
7a8c3d3b
MA
2074 atomic_set(&md->uevent_seq, 0);
2075 INIT_LIST_HEAD(&md->uevent_list);
86f1152b 2076 INIT_LIST_HEAD(&md->table_devices);
7a8c3d3b 2077 spin_lock_init(&md->uevent_lock);
1da177e4 2078
4a0b4ddf 2079 md->queue = blk_alloc_queue(GFP_KERNEL);
1da177e4 2080 if (!md->queue)
6ed7ade8 2081 goto bad_queue;
1da177e4 2082
4a0b4ddf 2083 dm_init_md_queue(md);
9faf400f 2084
1da177e4
LT
2085 md->disk = alloc_disk(1);
2086 if (!md->disk)
6ed7ade8 2087 goto bad_disk;
1da177e4 2088
316d315b
NK
2089 atomic_set(&md->pending[0], 0);
2090 atomic_set(&md->pending[1], 0);
f0b04115 2091 init_waitqueue_head(&md->wait);
53d5914f 2092 INIT_WORK(&md->work, dm_wq_work);
f0b04115 2093 init_waitqueue_head(&md->eventq);
2995fa78 2094 init_completion(&md->kobj_holder.completion);
f0b04115 2095
1da177e4
LT
2096 md->disk->major = _major;
2097 md->disk->first_minor = minor;
2098 md->disk->fops = &dm_blk_dops;
2099 md->disk->queue = md->queue;
2100 md->disk->private_data = md;
2101 sprintf(md->disk->disk_name, "dm-%d", minor);
2102 add_disk(md->disk);
7e51f257 2103 format_dev_t(md->name, MKDEV(_major, minor));
1da177e4 2104
670368a8 2105 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
304f3f6a
MB
2106 if (!md->wq)
2107 goto bad_thread;
2108
32a926da
MP
2109 md->bdev = bdget_disk(md->disk, 0);
2110 if (!md->bdev)
2111 goto bad_bdev;
2112
6a8736d1
TH
2113 bio_init(&md->flush_bio);
2114 md->flush_bio.bi_bdev = md->bdev;
2115 md->flush_bio.bi_rw = WRITE_FLUSH;
2116
fd2ed4d2
MP
2117 dm_stats_init(&md->stats);
2118
ba61fdd1 2119 /* Populate the mapping, nobody knows we exist yet */
f32c10b0 2120 spin_lock(&_minor_lock);
ba61fdd1 2121 old_md = idr_replace(&_minor_idr, md, minor);
f32c10b0 2122 spin_unlock(&_minor_lock);
ba61fdd1
JM
2123
2124 BUG_ON(old_md != MINOR_ALLOCED);
2125
1da177e4
LT
2126 return md;
2127
32a926da
MP
2128bad_bdev:
2129 destroy_workqueue(md->wq);
304f3f6a 2130bad_thread:
03022c54 2131 del_gendisk(md->disk);
304f3f6a 2132 put_disk(md->disk);
6ed7ade8 2133bad_disk:
1312f40e 2134 blk_cleanup_queue(md->queue);
6ed7ade8 2135bad_queue:
83d5e5b0
MP
2136 cleanup_srcu_struct(&md->io_barrier);
2137bad_io_barrier:
1da177e4 2138 free_minor(minor);
6ed7ade8 2139bad_minor:
10da4f79 2140 module_put(THIS_MODULE);
6ed7ade8 2141bad_module_get:
1da177e4
LT
2142 kfree(md);
2143 return NULL;
2144}
2145
ae9da83f
JN
2146static void unlock_fs(struct mapped_device *md);
2147
1da177e4
LT
2148static void free_dev(struct mapped_device *md)
2149{
f331c029 2150 int minor = MINOR(disk_devt(md->disk));
63d94e48 2151
32a926da
MP
2152 unlock_fs(md);
2153 bdput(md->bdev);
304f3f6a 2154 destroy_workqueue(md->wq);
e6ee8c0b
KU
2155 if (md->io_pool)
2156 mempool_destroy(md->io_pool);
2157 if (md->bs)
2158 bioset_free(md->bs);
9c47008d 2159 blk_integrity_unregister(md->disk);
1da177e4 2160 del_gendisk(md->disk);
83d5e5b0 2161 cleanup_srcu_struct(&md->io_barrier);
86f1152b 2162 free_table_devices(&md->table_devices);
63d94e48 2163 free_minor(minor);
fba9f90e
JM
2164
2165 spin_lock(&_minor_lock);
2166 md->disk->private_data = NULL;
2167 spin_unlock(&_minor_lock);
2168
1da177e4 2169 put_disk(md->disk);
1312f40e 2170 blk_cleanup_queue(md->queue);
fd2ed4d2 2171 dm_stats_cleanup(&md->stats);
10da4f79 2172 module_put(THIS_MODULE);
1da177e4
LT
2173 kfree(md);
2174}
2175
e6ee8c0b
KU
2176static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2177{
c0820cf5 2178 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
e6ee8c0b 2179
5f015204 2180 if (md->io_pool && md->bs) {
16245bdc
JN
2181 /* The md already has necessary mempools. */
2182 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
2183 /*
2184 * Reload bioset because front_pad may have changed
2185 * because a different table was loaded.
2186 */
2187 bioset_free(md->bs);
2188 md->bs = p->bs;
2189 p->bs = NULL;
2190 } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
16245bdc
JN
2191 /*
2192 * There's no need to reload with request-based dm
2193 * because the size of front_pad doesn't change.
2194 * Note for future: If you are to reload bioset,
2195 * prep-ed requests in the queue may refer
2196 * to bio from the old bioset, so you must walk
2197 * through the queue to unprep.
2198 */
2199 }
e6ee8c0b 2200 goto out;
c0820cf5 2201 }
e6ee8c0b 2202
5f015204 2203 BUG_ON(!p || md->io_pool || md->bs);
e6ee8c0b
KU
2204
2205 md->io_pool = p->io_pool;
2206 p->io_pool = NULL;
e6ee8c0b
KU
2207 md->bs = p->bs;
2208 p->bs = NULL;
2209
2210out:
2211 /* mempool bind completed, now no need any mempools in the table */
2212 dm_table_free_md_mempools(t);
2213}
2214
1da177e4
LT
2215/*
2216 * Bind a table to the device.
2217 */
2218static void event_callback(void *context)
2219{
7a8c3d3b
MA
2220 unsigned long flags;
2221 LIST_HEAD(uevents);
1da177e4
LT
2222 struct mapped_device *md = (struct mapped_device *) context;
2223
7a8c3d3b
MA
2224 spin_lock_irqsave(&md->uevent_lock, flags);
2225 list_splice_init(&md->uevent_list, &uevents);
2226 spin_unlock_irqrestore(&md->uevent_lock, flags);
2227
ed9e1982 2228 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
7a8c3d3b 2229
1da177e4
LT
2230 atomic_inc(&md->event_nr);
2231 wake_up(&md->eventq);
2232}
2233
c217649b
MS
2234/*
2235 * Protected by md->suspend_lock obtained by dm_swap_table().
2236 */
4e90188b 2237static void __set_size(struct mapped_device *md, sector_t size)
1da177e4 2238{
4e90188b 2239 set_capacity(md->disk, size);
1da177e4 2240
db8fef4f 2241 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1da177e4
LT
2242}
2243
d5b9dd04
MP
2244/*
2245 * Return 1 if the queue has a compulsory merge_bvec_fn function.
2246 *
2247 * If this function returns 0, then the device is either a non-dm
2248 * device without a merge_bvec_fn, or it is a dm device that is
2249 * able to split any bios it receives that are too big.
2250 */
2251int dm_queue_merge_is_compulsory(struct request_queue *q)
2252{
2253 struct mapped_device *dev_md;
2254
2255 if (!q->merge_bvec_fn)
2256 return 0;
2257
2258 if (q->make_request_fn == dm_request) {
2259 dev_md = q->queuedata;
2260 if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
2261 return 0;
2262 }
2263
2264 return 1;
2265}
2266
2267static int dm_device_merge_is_compulsory(struct dm_target *ti,
2268 struct dm_dev *dev, sector_t start,
2269 sector_t len, void *data)
2270{
2271 struct block_device *bdev = dev->bdev;
2272 struct request_queue *q = bdev_get_queue(bdev);
2273
2274 return dm_queue_merge_is_compulsory(q);
2275}
2276
2277/*
2278 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2279 * on the properties of the underlying devices.
2280 */
2281static int dm_table_merge_is_optional(struct dm_table *table)
2282{
2283 unsigned i = 0;
2284 struct dm_target *ti;
2285
2286 while (i < dm_table_get_num_targets(table)) {
2287 ti = dm_table_get_target(table, i++);
2288
2289 if (ti->type->iterate_devices &&
2290 ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
2291 return 0;
2292 }
2293
2294 return 1;
2295}
2296
042d2a9b
AK
2297/*
2298 * Returns old map, which caller must destroy.
2299 */
2300static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2301 struct queue_limits *limits)
1da177e4 2302{
042d2a9b 2303 struct dm_table *old_map;
165125e1 2304 struct request_queue *q = md->queue;
1da177e4 2305 sector_t size;
d5b9dd04 2306 int merge_is_optional;
1da177e4
LT
2307
2308 size = dm_table_get_size(t);
3ac51e74
DW
2309
2310 /*
2311 * Wipe any geometry if the size of the table changed.
2312 */
fd2ed4d2 2313 if (size != dm_get_size(md))
3ac51e74
DW
2314 memset(&md->geometry, 0, sizeof(md->geometry));
2315
32a926da 2316 __set_size(md, size);
d5816876 2317
2ca3310e
AK
2318 dm_table_event_callback(t, event_callback, md);
2319
e6ee8c0b
KU
2320 /*
2321 * The queue hasn't been stopped yet, if the old table type wasn't
2322 * for request-based during suspension. So stop it to prevent
2323 * I/O mapping before resume.
2324 * This must be done before setting the queue restrictions,
2325 * because request-based dm may be run just after the setting.
2326 */
2327 if (dm_table_request_based(t) && !blk_queue_stopped(q))
2328 stop_queue(q);
2329
2330 __bind_mempools(md, t);
2331
d5b9dd04
MP
2332 merge_is_optional = dm_table_merge_is_optional(t);
2333
a12f5d48 2334 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
83d5e5b0 2335 rcu_assign_pointer(md->map, t);
36a0456f
AK
2336 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2337
754c5fc7 2338 dm_table_set_restrictions(t, q, limits);
d5b9dd04
MP
2339 if (merge_is_optional)
2340 set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2341 else
2342 clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
41abc4e1
HR
2343 if (old_map)
2344 dm_sync_table(md);
1da177e4 2345
042d2a9b 2346 return old_map;
1da177e4
LT
2347}
2348
a7940155
AK
2349/*
2350 * Returns unbound table for the caller to free.
2351 */
2352static struct dm_table *__unbind(struct mapped_device *md)
1da177e4 2353{
a12f5d48 2354 struct dm_table *map = rcu_dereference_protected(md->map, 1);
1da177e4
LT
2355
2356 if (!map)
a7940155 2357 return NULL;
1da177e4
LT
2358
2359 dm_table_event_callback(map, NULL, NULL);
9cdb8520 2360 RCU_INIT_POINTER(md->map, NULL);
83d5e5b0 2361 dm_sync_table(md);
a7940155
AK
2362
2363 return map;
1da177e4
LT
2364}
2365
2366/*
2367 * Constructor for a new device.
2368 */
2b06cfff 2369int dm_create(int minor, struct mapped_device **result)
1da177e4
LT
2370{
2371 struct mapped_device *md;
2372
2b06cfff 2373 md = alloc_dev(minor);
1da177e4
LT
2374 if (!md)
2375 return -ENXIO;
2376
784aae73
MB
2377 dm_sysfs_init(md);
2378
1da177e4
LT
2379 *result = md;
2380 return 0;
2381}
2382
a5664dad
MS
2383/*
2384 * Functions to manage md->type.
2385 * All are required to hold md->type_lock.
2386 */
2387void dm_lock_md_type(struct mapped_device *md)
2388{
2389 mutex_lock(&md->type_lock);
2390}
2391
2392void dm_unlock_md_type(struct mapped_device *md)
2393{
2394 mutex_unlock(&md->type_lock);
2395}
2396
2397void dm_set_md_type(struct mapped_device *md, unsigned type)
2398{
00c4fc3b 2399 BUG_ON(!mutex_is_locked(&md->type_lock));
a5664dad
MS
2400 md->type = type;
2401}
2402
2403unsigned dm_get_md_type(struct mapped_device *md)
2404{
00c4fc3b 2405 BUG_ON(!mutex_is_locked(&md->type_lock));
a5664dad
MS
2406 return md->type;
2407}
2408
36a0456f
AK
2409struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2410{
2411 return md->immutable_target_type;
2412}
2413
f84cb8a4
MS
2414/*
2415 * The queue_limits are only valid as long as you have a reference
2416 * count on 'md'.
2417 */
2418struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2419{
2420 BUG_ON(!atomic_read(&md->holders));
2421 return &md->queue->limits;
2422}
2423EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2424
4a0b4ddf
MS
2425/*
2426 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2427 */
2428static int dm_init_request_based_queue(struct mapped_device *md)
2429{
2430 struct request_queue *q = NULL;
2431
2432 if (md->queue->elevator)
2433 return 1;
2434
2435 /* Fully initialize the queue */
2436 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2437 if (!q)
2438 return 0;
2439
2440 md->queue = q;
4a0b4ddf
MS
2441 dm_init_md_queue(md);
2442 blk_queue_softirq_done(md->queue, dm_softirq_done);
2443 blk_queue_prep_rq(md->queue, dm_prep_fn);
2444 blk_queue_lld_busy(md->queue, dm_lld_busy);
4a0b4ddf
MS
2445
2446 elv_register_queue(md->queue);
2447
2448 return 1;
2449}
2450
2451/*
2452 * Setup the DM device's queue based on md's type
2453 */
2454int dm_setup_md_queue(struct mapped_device *md)
2455{
2456 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2457 !dm_init_request_based_queue(md)) {
2458 DMWARN("Cannot initialize queue for request-based mapped device");
2459 return -EINVAL;
2460 }
2461
2462 return 0;
2463}
2464
637842cf 2465static struct mapped_device *dm_find_md(dev_t dev)
1da177e4
LT
2466{
2467 struct mapped_device *md;
1da177e4
LT
2468 unsigned minor = MINOR(dev);
2469
2470 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2471 return NULL;
2472
f32c10b0 2473 spin_lock(&_minor_lock);
1da177e4
LT
2474
2475 md = idr_find(&_minor_idr, minor);
fba9f90e 2476 if (md && (md == MINOR_ALLOCED ||
f331c029 2477 (MINOR(disk_devt(dm_disk(md))) != minor) ||
abdc568b 2478 dm_deleting_md(md) ||
17b2f66f 2479 test_bit(DMF_FREEING, &md->flags))) {
637842cf 2480 md = NULL;
fba9f90e
JM
2481 goto out;
2482 }
1da177e4 2483
fba9f90e 2484out:
f32c10b0 2485 spin_unlock(&_minor_lock);
1da177e4 2486
637842cf
DT
2487 return md;
2488}
2489
d229a958
DT
2490struct mapped_device *dm_get_md(dev_t dev)
2491{
2492 struct mapped_device *md = dm_find_md(dev);
2493
2494 if (md)
2495 dm_get(md);
2496
2497 return md;
2498}
3cf2e4ba 2499EXPORT_SYMBOL_GPL(dm_get_md);
d229a958 2500
9ade92a9 2501void *dm_get_mdptr(struct mapped_device *md)
637842cf 2502{
9ade92a9 2503 return md->interface_ptr;
1da177e4
LT
2504}
2505
2506void dm_set_mdptr(struct mapped_device *md, void *ptr)
2507{
2508 md->interface_ptr = ptr;
2509}
2510
2511void dm_get(struct mapped_device *md)
2512{
2513 atomic_inc(&md->holders);
3f77316d 2514 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1da177e4
LT
2515}
2516
72d94861
AK
2517const char *dm_device_name(struct mapped_device *md)
2518{
2519 return md->name;
2520}
2521EXPORT_SYMBOL_GPL(dm_device_name);
2522
3f77316d 2523static void __dm_destroy(struct mapped_device *md, bool wait)
1da177e4 2524{
1134e5ae 2525 struct dm_table *map;
83d5e5b0 2526 int srcu_idx;
1da177e4 2527
3f77316d 2528 might_sleep();
fba9f90e 2529
3f77316d 2530 spin_lock(&_minor_lock);
83d5e5b0 2531 map = dm_get_live_table(md, &srcu_idx);
3f77316d
KU
2532 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2533 set_bit(DMF_FREEING, &md->flags);
2534 spin_unlock(&_minor_lock);
2535
2536 if (!dm_suspended_md(md)) {
2537 dm_table_presuspend_targets(map);
2538 dm_table_postsuspend_targets(map);
1da177e4 2539 }
3f77316d 2540
83d5e5b0
MP
2541 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2542 dm_put_live_table(md, srcu_idx);
2543
3f77316d
KU
2544 /*
2545 * Rare, but there may be I/O requests still going to complete,
2546 * for example. Wait for all references to disappear.
2547 * No one should increment the reference count of the mapped_device,
2548 * after the mapped_device state becomes DMF_FREEING.
2549 */
2550 if (wait)
2551 while (atomic_read(&md->holders))
2552 msleep(1);
2553 else if (atomic_read(&md->holders))
2554 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2555 dm_device_name(md), atomic_read(&md->holders));
2556
2557 dm_sysfs_exit(md);
3f77316d
KU
2558 dm_table_destroy(__unbind(md));
2559 free_dev(md);
2560}
2561
2562void dm_destroy(struct mapped_device *md)
2563{
2564 __dm_destroy(md, true);
2565}
2566
2567void dm_destroy_immediate(struct mapped_device *md)
2568{
2569 __dm_destroy(md, false);
2570}
2571
2572void dm_put(struct mapped_device *md)
2573{
2574 atomic_dec(&md->holders);
1da177e4 2575}
79eb885c 2576EXPORT_SYMBOL_GPL(dm_put);
1da177e4 2577
401600df 2578static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
46125c1c
MB
2579{
2580 int r = 0;
b44ebeb0
MP
2581 DECLARE_WAITQUEUE(wait, current);
2582
b44ebeb0 2583 add_wait_queue(&md->wait, &wait);
46125c1c
MB
2584
2585 while (1) {
401600df 2586 set_current_state(interruptible);
46125c1c 2587
b4324fee 2588 if (!md_in_flight(md))
46125c1c
MB
2589 break;
2590
401600df
MP
2591 if (interruptible == TASK_INTERRUPTIBLE &&
2592 signal_pending(current)) {
46125c1c
MB
2593 r = -EINTR;
2594 break;
2595 }
2596
2597 io_schedule();
2598 }
2599 set_current_state(TASK_RUNNING);
2600
b44ebeb0
MP
2601 remove_wait_queue(&md->wait, &wait);
2602
46125c1c
MB
2603 return r;
2604}
2605
1da177e4
LT
2606/*
2607 * Process the deferred bios
2608 */
ef208587 2609static void dm_wq_work(struct work_struct *work)
1da177e4 2610{
ef208587
MP
2611 struct mapped_device *md = container_of(work, struct mapped_device,
2612 work);
6d6f10df 2613 struct bio *c;
83d5e5b0
MP
2614 int srcu_idx;
2615 struct dm_table *map;
1da177e4 2616
83d5e5b0 2617 map = dm_get_live_table(md, &srcu_idx);
ef208587 2618
3b00b203 2619 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
df12ee99
AK
2620 spin_lock_irq(&md->deferred_lock);
2621 c = bio_list_pop(&md->deferred);
2622 spin_unlock_irq(&md->deferred_lock);
2623
6a8736d1 2624 if (!c)
df12ee99 2625 break;
022c2611 2626
e6ee8c0b
KU
2627 if (dm_request_based(md))
2628 generic_make_request(c);
6a8736d1 2629 else
83d5e5b0 2630 __split_and_process_bio(md, map, c);
022c2611 2631 }
73d410c0 2632
83d5e5b0 2633 dm_put_live_table(md, srcu_idx);
1da177e4
LT
2634}
2635
9a1fb464 2636static void dm_queue_flush(struct mapped_device *md)
304f3f6a 2637{
3b00b203 2638 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
4e857c58 2639 smp_mb__after_atomic();
53d5914f 2640 queue_work(md->wq, &md->work);
304f3f6a
MB
2641}
2642
1da177e4 2643/*
042d2a9b 2644 * Swap in a new table, returning the old one for the caller to destroy.
1da177e4 2645 */
042d2a9b 2646struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
1da177e4 2647{
87eb5b21 2648 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
754c5fc7 2649 struct queue_limits limits;
042d2a9b 2650 int r;
1da177e4 2651
e61290a4 2652 mutex_lock(&md->suspend_lock);
1da177e4
LT
2653
2654 /* device must be suspended */
4f186f8b 2655 if (!dm_suspended_md(md))
93c534ae 2656 goto out;
1da177e4 2657
3ae70656
MS
2658 /*
2659 * If the new table has no data devices, retain the existing limits.
2660 * This helps multipath with queue_if_no_path if all paths disappear,
2661 * then new I/O is queued based on these limits, and then some paths
2662 * reappear.
2663 */
2664 if (dm_table_has_no_data_devices(table)) {
83d5e5b0 2665 live_map = dm_get_live_table_fast(md);
3ae70656
MS
2666 if (live_map)
2667 limits = md->queue->limits;
83d5e5b0 2668 dm_put_live_table_fast(md);
3ae70656
MS
2669 }
2670
87eb5b21
MC
2671 if (!live_map) {
2672 r = dm_calculate_queue_limits(table, &limits);
2673 if (r) {
2674 map = ERR_PTR(r);
2675 goto out;
2676 }
042d2a9b 2677 }
754c5fc7 2678
042d2a9b 2679 map = __bind(md, table, &limits);
1da177e4 2680
93c534ae 2681out:
e61290a4 2682 mutex_unlock(&md->suspend_lock);
042d2a9b 2683 return map;
1da177e4
LT
2684}
2685
2686/*
2687 * Functions to lock and unlock any filesystem running on the
2688 * device.
2689 */
2ca3310e 2690static int lock_fs(struct mapped_device *md)
1da177e4 2691{
e39e2e95 2692 int r;
1da177e4
LT
2693
2694 WARN_ON(md->frozen_sb);
dfbe03f6 2695
db8fef4f 2696 md->frozen_sb = freeze_bdev(md->bdev);
dfbe03f6 2697 if (IS_ERR(md->frozen_sb)) {
cf222b37 2698 r = PTR_ERR(md->frozen_sb);
e39e2e95
AK
2699 md->frozen_sb = NULL;
2700 return r;
dfbe03f6
AK
2701 }
2702
aa8d7c2f
AK
2703 set_bit(DMF_FROZEN, &md->flags);
2704
1da177e4
LT
2705 return 0;
2706}
2707
2ca3310e 2708static void unlock_fs(struct mapped_device *md)
1da177e4 2709{
aa8d7c2f
AK
2710 if (!test_bit(DMF_FROZEN, &md->flags))
2711 return;
2712
db8fef4f 2713 thaw_bdev(md->bdev, md->frozen_sb);
1da177e4 2714 md->frozen_sb = NULL;
aa8d7c2f 2715 clear_bit(DMF_FROZEN, &md->flags);
1da177e4
LT
2716}
2717
2718/*
ffcc3936
MS
2719 * If __dm_suspend returns 0, the device is completely quiescent
2720 * now. There is no request-processing activity. All new requests
2721 * are being added to md->deferred list.
cec47e3d 2722 *
ffcc3936 2723 * Caller must hold md->suspend_lock
cec47e3d 2724 */
ffcc3936
MS
2725static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2726 unsigned suspend_flags, int interruptible)
1da177e4 2727{
ffcc3936
MS
2728 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2729 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2730 int r;
1da177e4 2731
2e93ccc1
KU
2732 /*
2733 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2734 * This flag is cleared before dm_suspend returns.
2735 */
2736 if (noflush)
2737 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2738
d67ee213
MS
2739 /*
2740 * This gets reverted if there's an error later and the targets
2741 * provide the .presuspend_undo hook.
2742 */
cf222b37
AK
2743 dm_table_presuspend_targets(map);
2744
32a926da 2745 /*
9f518b27
KU
2746 * Flush I/O to the device.
2747 * Any I/O submitted after lock_fs() may not be flushed.
2748 * noflush takes precedence over do_lockfs.
2749 * (lock_fs() flushes I/Os and waits for them to complete.)
32a926da
MP
2750 */
2751 if (!noflush && do_lockfs) {
2752 r = lock_fs(md);
d67ee213
MS
2753 if (r) {
2754 dm_table_presuspend_undo_targets(map);
ffcc3936 2755 return r;
d67ee213 2756 }
aa8d7c2f 2757 }
1da177e4
LT
2758
2759 /*
3b00b203
MP
2760 * Here we must make sure that no processes are submitting requests
2761 * to target drivers i.e. no one may be executing
2762 * __split_and_process_bio. This is called from dm_request and
2763 * dm_wq_work.
2764 *
2765 * To get all processes out of __split_and_process_bio in dm_request,
2766 * we take the write lock. To prevent any process from reentering
6a8736d1
TH
2767 * __split_and_process_bio from dm_request and quiesce the thread
2768 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2769 * flush_workqueue(md->wq).
1da177e4 2770 */
1eb787ec 2771 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
41abc4e1
HR
2772 if (map)
2773 synchronize_srcu(&md->io_barrier);
1da177e4 2774
d0bcb878 2775 /*
29e4013d
TH
2776 * Stop md->queue before flushing md->wq in case request-based
2777 * dm defers requests to md->wq from md->queue.
d0bcb878 2778 */
cec47e3d 2779 if (dm_request_based(md))
9f518b27 2780 stop_queue(md->queue);
cec47e3d 2781
d0bcb878
KU
2782 flush_workqueue(md->wq);
2783
1da177e4 2784 /*
3b00b203
MP
2785 * At this point no more requests are entering target request routines.
2786 * We call dm_wait_for_completion to wait for all existing requests
2787 * to finish.
1da177e4 2788 */
ffcc3936 2789 r = dm_wait_for_completion(md, interruptible);
1da177e4 2790
6d6f10df 2791 if (noflush)
022c2611 2792 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
41abc4e1
HR
2793 if (map)
2794 synchronize_srcu(&md->io_barrier);
2e93ccc1 2795
1da177e4 2796 /* were we interrupted ? */
46125c1c 2797 if (r < 0) {
9a1fb464 2798 dm_queue_flush(md);
73d410c0 2799
cec47e3d 2800 if (dm_request_based(md))
9f518b27 2801 start_queue(md->queue);
cec47e3d 2802
2ca3310e 2803 unlock_fs(md);
d67ee213 2804 dm_table_presuspend_undo_targets(map);
ffcc3936 2805 /* pushback list is already flushed, so skip flush */
2ca3310e 2806 }
1da177e4 2807
ffcc3936
MS
2808 return r;
2809}
2810
2811/*
2812 * We need to be able to change a mapping table under a mounted
2813 * filesystem. For example we might want to move some data in
2814 * the background. Before the table can be swapped with
2815 * dm_bind_table, dm_suspend must be called to flush any in
2816 * flight bios and ensure that any further io gets deferred.
2817 */
2818/*
2819 * Suspend mechanism in request-based dm.
2820 *
2821 * 1. Flush all I/Os by lock_fs() if needed.
2822 * 2. Stop dispatching any I/O by stopping the request_queue.
2823 * 3. Wait for all in-flight I/Os to be completed or requeued.
2824 *
2825 * To abort suspend, start the request_queue.
2826 */
2827int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2828{
2829 struct dm_table *map = NULL;
2830 int r = 0;
2831
2832retry:
2833 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2834
2835 if (dm_suspended_md(md)) {
2836 r = -EINVAL;
2837 goto out_unlock;
2838 }
2839
2840 if (dm_suspended_internally_md(md)) {
2841 /* already internally suspended, wait for internal resume */
2842 mutex_unlock(&md->suspend_lock);
2843 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2844 if (r)
2845 return r;
2846 goto retry;
2847 }
2848
a12f5d48 2849 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
ffcc3936
MS
2850
2851 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
2852 if (r)
2853 goto out_unlock;
3b00b203 2854
2ca3310e 2855 set_bit(DMF_SUSPENDED, &md->flags);
b84b0287 2856
4d4471cb
KU
2857 dm_table_postsuspend_targets(map);
2858
d287483d 2859out_unlock:
e61290a4 2860 mutex_unlock(&md->suspend_lock);
cf222b37 2861 return r;
1da177e4
LT
2862}
2863
ffcc3936
MS
2864static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2865{
2866 if (map) {
2867 int r = dm_table_resume_targets(map);
2868 if (r)
2869 return r;
2870 }
2871
2872 dm_queue_flush(md);
2873
2874 /*
2875 * Flushing deferred I/Os must be done after targets are resumed
2876 * so that mapping of targets can work correctly.
2877 * Request-based dm is queueing the deferred I/Os in its request_queue.
2878 */
2879 if (dm_request_based(md))
2880 start_queue(md->queue);
2881
2882 unlock_fs(md);
2883
2884 return 0;
2885}
2886
1da177e4
LT
2887int dm_resume(struct mapped_device *md)
2888{
cf222b37 2889 int r = -EINVAL;
cf222b37 2890 struct dm_table *map = NULL;
1da177e4 2891
ffcc3936
MS
2892retry:
2893 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2894
4f186f8b 2895 if (!dm_suspended_md(md))
cf222b37 2896 goto out;
cf222b37 2897
ffcc3936
MS
2898 if (dm_suspended_internally_md(md)) {
2899 /* already internally suspended, wait for internal resume */
2900 mutex_unlock(&md->suspend_lock);
2901 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2902 if (r)
2903 return r;
2904 goto retry;
2905 }
2906
a12f5d48 2907 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2ca3310e 2908 if (!map || !dm_table_get_size(map))
cf222b37 2909 goto out;
1da177e4 2910
ffcc3936 2911 r = __dm_resume(md, map);
8757b776
MB
2912 if (r)
2913 goto out;
2ca3310e 2914
2ca3310e
AK
2915 clear_bit(DMF_SUSPENDED, &md->flags);
2916
cf222b37
AK
2917 r = 0;
2918out:
e61290a4 2919 mutex_unlock(&md->suspend_lock);
2ca3310e 2920
cf222b37 2921 return r;
1da177e4
LT
2922}
2923
fd2ed4d2
MP
2924/*
2925 * Internal suspend/resume works like userspace-driven suspend. It waits
2926 * until all bios finish and prevents issuing new bios to the target drivers.
2927 * It may be used only from the kernel.
fd2ed4d2
MP
2928 */
2929
ffcc3936 2930static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
fd2ed4d2 2931{
ffcc3936
MS
2932 struct dm_table *map = NULL;
2933
96b26c8c 2934 if (md->internal_suspend_count++)
ffcc3936
MS
2935 return; /* nested internal suspend */
2936
2937 if (dm_suspended_md(md)) {
2938 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2939 return; /* nest suspend */
2940 }
2941
a12f5d48 2942 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
ffcc3936
MS
2943
2944 /*
2945 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2946 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
2947 * would require changing .presuspend to return an error -- avoid this
2948 * until there is a need for more elaborate variants of internal suspend.
2949 */
2950 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);
2951
2952 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2953
2954 dm_table_postsuspend_targets(map);
2955}
2956
2957static void __dm_internal_resume(struct mapped_device *md)
2958{
96b26c8c
MP
2959 BUG_ON(!md->internal_suspend_count);
2960
2961 if (--md->internal_suspend_count)
ffcc3936
MS
2962 return; /* resume from nested internal suspend */
2963
fd2ed4d2 2964 if (dm_suspended_md(md))
ffcc3936
MS
2965 goto done; /* resume from nested suspend */
2966
2967 /*
2968 * NOTE: existing callers don't need to call dm_table_resume_targets
2969 * (which may fail -- so best to avoid it for now by passing NULL map)
2970 */
2971 (void) __dm_resume(md, NULL);
2972
2973done:
2974 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2975 smp_mb__after_atomic();
2976 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2977}
2978
2979void dm_internal_suspend_noflush(struct mapped_device *md)
2980{
2981 mutex_lock(&md->suspend_lock);
2982 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2983 mutex_unlock(&md->suspend_lock);
2984}
2985EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2986
2987void dm_internal_resume(struct mapped_device *md)
2988{
2989 mutex_lock(&md->suspend_lock);
2990 __dm_internal_resume(md);
2991 mutex_unlock(&md->suspend_lock);
2992}
2993EXPORT_SYMBOL_GPL(dm_internal_resume);
2994
2995/*
2996 * Fast variants of internal suspend/resume hold md->suspend_lock,
2997 * which prevents interaction with userspace-driven suspend.
2998 */
2999
3000void dm_internal_suspend_fast(struct mapped_device *md)
3001{
3002 mutex_lock(&md->suspend_lock);
3003 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
fd2ed4d2
MP
3004 return;
3005
3006 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3007 synchronize_srcu(&md->io_barrier);
3008 flush_workqueue(md->wq);
3009 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
3010}
3011
ffcc3936 3012void dm_internal_resume_fast(struct mapped_device *md)
fd2ed4d2 3013{
ffcc3936 3014 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
fd2ed4d2
MP
3015 goto done;
3016
3017 dm_queue_flush(md);
3018
3019done:
3020 mutex_unlock(&md->suspend_lock);
3021}
3022
1da177e4
LT
3023/*-----------------------------------------------------------------
3024 * Event notification.
3025 *---------------------------------------------------------------*/
3abf85b5 3026int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
60935eb2 3027 unsigned cookie)
69267a30 3028{
60935eb2
MB
3029 char udev_cookie[DM_COOKIE_LENGTH];
3030 char *envp[] = { udev_cookie, NULL };
3031
3032 if (!cookie)
3abf85b5 3033 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
60935eb2
MB
3034 else {
3035 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
3036 DM_COOKIE_ENV_VAR_NAME, cookie);
3abf85b5
PR
3037 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
3038 action, envp);
60935eb2 3039 }
69267a30
AK
3040}
3041
7a8c3d3b
MA
3042uint32_t dm_next_uevent_seq(struct mapped_device *md)
3043{
3044 return atomic_add_return(1, &md->uevent_seq);
3045}
3046
1da177e4
LT
3047uint32_t dm_get_event_nr(struct mapped_device *md)
3048{
3049 return atomic_read(&md->event_nr);
3050}
3051
3052int dm_wait_event(struct mapped_device *md, int event_nr)
3053{
3054 return wait_event_interruptible(md->eventq,
3055 (event_nr != atomic_read(&md->event_nr)));
3056}
3057
7a8c3d3b
MA
3058void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
3059{
3060 unsigned long flags;
3061
3062 spin_lock_irqsave(&md->uevent_lock, flags);
3063 list_add(elist, &md->uevent_list);
3064 spin_unlock_irqrestore(&md->uevent_lock, flags);
3065}
3066
1da177e4
LT
3067/*
3068 * The gendisk is only valid as long as you have a reference
3069 * count on 'md'.
3070 */
3071struct gendisk *dm_disk(struct mapped_device *md)
3072{
3073 return md->disk;
3074}
3075
784aae73
MB
3076struct kobject *dm_kobject(struct mapped_device *md)
3077{
2995fa78 3078 return &md->kobj_holder.kobj;
784aae73
MB
3079}
3080
784aae73
MB
3081struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
3082{
3083 struct mapped_device *md;
3084
2995fa78 3085 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
784aae73 3086
4d89b7b4 3087 if (test_bit(DMF_FREEING, &md->flags) ||
432a212c 3088 dm_deleting_md(md))
4d89b7b4
MB
3089 return NULL;
3090
784aae73
MB
3091 dm_get(md);
3092 return md;
3093}
3094
4f186f8b 3095int dm_suspended_md(struct mapped_device *md)
1da177e4
LT
3096{
3097 return test_bit(DMF_SUSPENDED, &md->flags);
3098}
3099
ffcc3936
MS
3100int dm_suspended_internally_md(struct mapped_device *md)
3101{
3102 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3103}
3104
2c140a24
MP
3105int dm_test_deferred_remove_flag(struct mapped_device *md)
3106{
3107 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
3108}
3109
64dbce58
KU
3110int dm_suspended(struct dm_target *ti)
3111{
ecdb2e25 3112 return dm_suspended_md(dm_table_get_md(ti->table));
64dbce58
KU
3113}
3114EXPORT_SYMBOL_GPL(dm_suspended);
3115
2e93ccc1
KU
3116int dm_noflush_suspending(struct dm_target *ti)
3117{
ecdb2e25 3118 return __noflush_suspending(dm_table_get_md(ti->table));
2e93ccc1
KU
3119}
3120EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3121
c0820cf5 3122struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
e6ee8c0b 3123{
5f015204
JN
3124 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
3125 struct kmem_cache *cachep;
3126 unsigned int pool_size;
3127 unsigned int front_pad;
e6ee8c0b
KU
3128
3129 if (!pools)
3130 return NULL;
3131
23e5083b 3132 if (type == DM_TYPE_BIO_BASED) {
5f015204 3133 cachep = _io_cache;
e8603136 3134 pool_size = dm_get_reserved_bio_based_ios();
5f015204
JN
3135 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
3136 } else if (type == DM_TYPE_REQUEST_BASED) {
3137 cachep = _rq_tio_cache;
f4790826 3138 pool_size = dm_get_reserved_rq_based_ios();
5f015204
JN
3139 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
3140 /* per_bio_data_size is not used. See __bind_mempools(). */
3141 WARN_ON(per_bio_data_size != 0);
3142 } else
3143 goto out;
e6ee8c0b 3144
6cfa5857 3145 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
5f015204
JN
3146 if (!pools->io_pool)
3147 goto out;
e6ee8c0b 3148
3d8aab2d 3149 pools->bs = bioset_create_nobvec(pool_size, front_pad);
e6ee8c0b 3150 if (!pools->bs)
5f015204 3151 goto out;
e6ee8c0b 3152
a91a2785 3153 if (integrity && bioset_integrity_create(pools->bs, pool_size))
5f015204 3154 goto out;
a91a2785 3155
e6ee8c0b
KU
3156 return pools;
3157
5f015204
JN
3158out:
3159 dm_free_md_mempools(pools);
e6ee8c0b
KU
3160
3161 return NULL;
3162}
3163
3164void dm_free_md_mempools(struct dm_md_mempools *pools)
3165{
3166 if (!pools)
3167 return;
3168
3169 if (pools->io_pool)
3170 mempool_destroy(pools->io_pool);
3171
e6ee8c0b
KU
3172 if (pools->bs)
3173 bioset_free(pools->bs);
3174
3175 kfree(pools);
3176}
3177
83d5cde4 3178static const struct block_device_operations dm_blk_dops = {
1da177e4
LT
3179 .open = dm_blk_open,
3180 .release = dm_blk_close,
aa129a22 3181 .ioctl = dm_blk_ioctl,
3ac51e74 3182 .getgeo = dm_blk_getgeo,
1da177e4
LT
3183 .owner = THIS_MODULE
3184};
3185
1da177e4
LT
3186/*
3187 * module hooks
3188 */
3189module_init(dm_init);
3190module_exit(dm_exit);
3191
3192module_param(major, uint, 0);
3193MODULE_PARM_DESC(major, "The major number of the device mapper");
f4790826 3194
e8603136
MS
3195module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3196MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3197
f4790826
MS
3198module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
3199MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
3200
1da177e4
LT
3201MODULE_DESCRIPTION(DM_NAME " driver");
3202MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3203MODULE_LICENSE("GPL");