2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/device-mapper.h>
11 #include "dm-path-selector.h"
12 #include "dm-uevent.h"
14 #include <linux/blkdev.h>
15 #include <linux/ctype.h>
16 #include <linux/init.h>
17 #include <linux/mempool.h>
18 #include <linux/module.h>
19 #include <linux/pagemap.h>
20 #include <linux/slab.h>
21 #include <linux/time.h>
22 #include <linux/workqueue.h>
23 #include <linux/delay.h>
24 #include <scsi/scsi_dh.h>
25 #include <linux/atomic.h>
26 #include <linux/blk-mq.h>
28 #define DM_MSG_PREFIX "multipath"
29 #define DM_PG_INIT_DELAY_MSECS 2000
30 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
34 struct list_head list;
36 struct priority_group *pg; /* Owning PG */
37 unsigned is_active; /* Path status */
38 unsigned fail_count; /* Cumulative failure count */
41 struct delayed_work activate_path;
44 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
47 * Paths are grouped into Priority Groups and numbered from 1 upwards.
48 * Each has a path selector which controls which path gets used.
50 struct priority_group {
51 struct list_head list;
53 struct multipath *m; /* Owning multipath instance */
54 struct path_selector ps;
56 unsigned pg_num; /* Reference number */
57 unsigned bypassed; /* Temporarily bypass this PG? */
59 unsigned nr_pgpaths; /* Number of paths in PG */
60 struct list_head pgpaths;
63 /* Multipath context */
65 struct list_head list;
68 const char *hw_handler_name;
69 char *hw_handler_params;
73 unsigned nr_priority_groups;
74 struct list_head priority_groups;
76 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
78 unsigned pg_init_required; /* pg_init needs calling? */
79 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
80 unsigned pg_init_delay_retry; /* Delay pg_init retry? */
82 unsigned nr_valid_paths; /* Total number of usable paths */
83 struct pgpath *current_pgpath;
84 struct priority_group *current_pg;
85 struct priority_group *next_pg; /* Switch to this PG if set */
86 unsigned repeat_count; /* I/Os left before calling PS again */
88 unsigned queue_io:1; /* Must we queue all I/O? */
89 unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
90 unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
91 unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
92 unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
94 unsigned pg_init_retries; /* Number of times to retry pg_init */
95 unsigned pg_init_count; /* Number of times pg_init called */
96 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
98 struct work_struct trigger_event;
101 * We must use a mempool of dm_mpath_io structs so that we
102 * can resubmit bios on error.
104 mempool_t *mpio_pool;
106 struct mutex work_mutex;
110 * Context information attached to each bio we process.
113 struct pgpath *pgpath;
117 typedef int (*action_fn) (struct pgpath *pgpath);
119 static struct kmem_cache *_mpio_cache;
121 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
122 static void trigger_event(struct work_struct *work);
123 static void activate_path(struct work_struct *work);
124 static int __pgpath_busy(struct pgpath *pgpath);
127 /*-----------------------------------------------
128 * Allocation routines
129 *-----------------------------------------------*/
131 static struct pgpath *alloc_pgpath(void)
133 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
136 pgpath->is_active = 1;
137 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
143 static void free_pgpath(struct pgpath *pgpath)
148 static struct priority_group *alloc_priority_group(void)
150 struct priority_group *pg;
152 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
155 INIT_LIST_HEAD(&pg->pgpaths);
160 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
162 struct pgpath *pgpath, *tmp;
164 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
165 list_del(&pgpath->list);
166 dm_put_device(ti, pgpath->path.dev);
171 static void free_priority_group(struct priority_group *pg,
172 struct dm_target *ti)
174 struct path_selector *ps = &pg->ps;
177 ps->type->destroy(ps);
178 dm_put_path_selector(ps->type);
181 free_pgpaths(&pg->pgpaths, ti);
185 static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
189 m = kzalloc(sizeof(*m), GFP_KERNEL);
191 INIT_LIST_HEAD(&m->priority_groups);
192 spin_lock_init(&m->lock);
194 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
195 INIT_WORK(&m->trigger_event, trigger_event);
196 init_waitqueue_head(&m->pg_init_wait);
197 mutex_init(&m->work_mutex);
201 unsigned min_ios = dm_get_reserved_rq_based_ios();
203 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
217 static void free_multipath(struct multipath *m)
219 struct priority_group *pg, *tmp;
221 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
223 free_priority_group(pg, m->ti);
226 kfree(m->hw_handler_name);
227 kfree(m->hw_handler_params);
228 mempool_destroy(m->mpio_pool);
232 static struct dm_mpath_io *get_mpio(union map_info *info)
237 static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
239 struct dm_mpath_io *mpio;
242 /* Use blk-mq pdu memory requested via per_io_data_size */
243 mpio = get_mpio(info);
244 memset(mpio, 0, sizeof(*mpio));
248 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
252 memset(mpio, 0, sizeof(*mpio));
258 static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
260 /* Only needed for non blk-mq (.request_fn) multipath */
262 struct dm_mpath_io *mpio = info->ptr;
265 mempool_free(mpio, m->mpio_pool);
269 /*-----------------------------------------------
271 *-----------------------------------------------*/
273 static int __pg_init_all_paths(struct multipath *m)
275 struct pgpath *pgpath;
276 unsigned long pg_init_delay = 0;
278 if (m->pg_init_in_progress || m->pg_init_disabled)
282 m->pg_init_required = 0;
284 /* Check here to reset pg_init_required */
288 if (m->pg_init_delay_retry)
289 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
290 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
291 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
292 /* Skip failed paths */
293 if (!pgpath->is_active)
295 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
297 m->pg_init_in_progress++;
299 return m->pg_init_in_progress;
302 static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
304 m->current_pg = pgpath->pg;
306 /* Must we initialise the PG first, and queue I/O till it's ready? */
307 if (m->hw_handler_name) {
308 m->pg_init_required = 1;
311 m->pg_init_required = 0;
315 m->pg_init_count = 0;
318 static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
321 struct dm_path *path;
323 path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
327 m->current_pgpath = path_to_pgpath(path);
329 if (m->current_pg != pg)
330 __switch_pg(m, m->current_pgpath);
335 static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
337 struct priority_group *pg;
338 unsigned bypassed = 1;
340 if (!m->nr_valid_paths) {
345 /* Were we instructed to switch PG? */
349 if (!__choose_path_in_pg(m, pg, nr_bytes))
353 /* Don't change PG until it has no remaining paths */
354 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
358 * Loop through priority groups until we find a valid path.
359 * First time we skip PGs marked 'bypassed'.
360 * Second time we only try the ones we skipped, but set
361 * pg_init_delay_retry so we do not hammer controllers.
364 list_for_each_entry(pg, &m->priority_groups, list) {
365 if (pg->bypassed == bypassed)
367 if (!__choose_path_in_pg(m, pg, nr_bytes)) {
369 m->pg_init_delay_retry = 1;
373 } while (bypassed--);
376 m->current_pgpath = NULL;
377 m->current_pg = NULL;
381 * Check whether bios must be queued in the device-mapper core rather
382 * than here in the target.
384 * m->lock must be held on entry.
386 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
387 * same value then we are not between multipath_presuspend()
388 * and multipath_resume() calls and we have no need to check
389 * for the DMF_NOFLUSH_SUSPENDING flag.
391 static int __must_push_back(struct multipath *m)
393 return (m->queue_if_no_path ||
394 (m->queue_if_no_path != m->saved_queue_if_no_path &&
395 dm_noflush_suspending(m->ti)));
399 * Map cloned requests
401 static int __multipath_map(struct dm_target *ti, struct request *clone,
402 union map_info *map_context,
403 struct request *rq, struct request **__clone)
405 struct multipath *m = (struct multipath *) ti->private;
406 int r = DM_MAPIO_REQUEUE;
407 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
408 struct pgpath *pgpath;
409 struct block_device *bdev;
410 struct dm_mpath_io *mpio;
412 spin_lock_irq(&m->lock);
414 /* Do we need to select a new pgpath? */
415 if (!m->current_pgpath ||
416 (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
417 __choose_pgpath(m, nr_bytes);
419 pgpath = m->current_pgpath;
422 if (!__must_push_back(m))
423 r = -EIO; /* Failed */
425 } else if (m->queue_io || m->pg_init_required) {
426 __pg_init_all_paths(m);
430 mpio = set_mpio(m, map_context);
432 /* ENOMEM, requeue */
435 mpio->pgpath = pgpath;
436 mpio->nr_bytes = nr_bytes;
438 bdev = pgpath->path.dev->bdev;
440 spin_unlock_irq(&m->lock);
444 * Old request-based interface: allocated clone is passed in.
445 * Used by: .request_fn stacked on .request_fn path(s).
447 clone->q = bdev_get_queue(bdev);
448 clone->rq_disk = bdev->bd_disk;
449 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
452 * blk-mq request-based interface; used by both:
453 * .request_fn stacked on blk-mq path(s) and
454 * blk-mq stacked on blk-mq path(s).
456 *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
457 rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
458 if (IS_ERR(*__clone)) {
459 /* ENOMEM, requeue */
460 clear_request_fn_mpio(m, map_context);
463 (*__clone)->bio = (*__clone)->biotail = NULL;
464 (*__clone)->rq_disk = bdev->bd_disk;
465 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
468 if (pgpath->pg->ps.type->start_io)
469 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
472 return DM_MAPIO_REMAPPED;
475 spin_unlock_irq(&m->lock);
480 static int multipath_map(struct dm_target *ti, struct request *clone,
481 union map_info *map_context)
483 return __multipath_map(ti, clone, map_context, NULL, NULL);
486 static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
487 union map_info *map_context,
488 struct request **clone)
490 return __multipath_map(ti, NULL, map_context, rq, clone);
493 static void multipath_release_clone(struct request *clone)
495 blk_mq_free_request(clone);
499 * If we run out of usable paths, should we queue I/O or error it?
501 static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
502 unsigned save_old_value)
506 spin_lock_irqsave(&m->lock, flags);
509 m->saved_queue_if_no_path = m->queue_if_no_path;
511 m->saved_queue_if_no_path = queue_if_no_path;
512 m->queue_if_no_path = queue_if_no_path;
513 spin_unlock_irqrestore(&m->lock, flags);
515 if (!queue_if_no_path)
516 dm_table_run_md_queue_async(m->ti->table);
522 * An event is triggered whenever a path is taken out of use.
523 * Includes path failure and PG bypass.
525 static void trigger_event(struct work_struct *work)
527 struct multipath *m =
528 container_of(work, struct multipath, trigger_event);
530 dm_table_event(m->ti->table);
533 /*-----------------------------------------------------------------
534 * Constructor/argument parsing:
535 * <#multipath feature args> [<arg>]*
536 * <#hw_handler args> [hw_handler [<arg>]*]
538 * <initial priority group>
539 * [<selector> <#selector args> [<arg>]*
540 * <#paths> <#per-path selector args>
541 * [<path> [<arg>]* ]+ ]+
542 *---------------------------------------------------------------*/
543 static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
544 struct dm_target *ti)
547 struct path_selector_type *pst;
550 static struct dm_arg _args[] = {
551 {0, 1024, "invalid number of path selector args"},
554 pst = dm_get_path_selector(dm_shift_arg(as));
556 ti->error = "unknown path selector type";
560 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
562 dm_put_path_selector(pst);
566 r = pst->create(&pg->ps, ps_argc, as->argv);
568 dm_put_path_selector(pst);
569 ti->error = "path selector constructor failed";
574 dm_consume_args(as, ps_argc);
579 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
580 struct dm_target *ti)
584 struct multipath *m = ti->private;
585 struct request_queue *q = NULL;
586 const char *attached_handler_name;
588 /* we need at least a path arg */
590 ti->error = "no device given";
591 return ERR_PTR(-EINVAL);
596 return ERR_PTR(-ENOMEM);
598 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
601 ti->error = "error getting device";
605 if (m->retain_attached_hw_handler || m->hw_handler_name)
606 q = bdev_get_queue(p->path.dev->bdev);
608 if (m->retain_attached_hw_handler) {
610 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
611 if (attached_handler_name) {
613 * Reset hw_handler_name to match the attached handler
614 * and clear any hw_handler_params associated with the
617 * NB. This modifies the table line to show the actual
618 * handler instead of the original table passed in.
620 kfree(m->hw_handler_name);
621 m->hw_handler_name = attached_handler_name;
623 kfree(m->hw_handler_params);
624 m->hw_handler_params = NULL;
628 if (m->hw_handler_name) {
629 r = scsi_dh_attach(q, m->hw_handler_name);
631 char b[BDEVNAME_SIZE];
633 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
634 bdevname(p->path.dev->bdev, b));
638 ti->error = "error attaching hardware handler";
639 dm_put_device(ti, p->path.dev);
643 if (m->hw_handler_params) {
644 r = scsi_dh_set_params(q, m->hw_handler_params);
646 ti->error = "unable to set hardware "
647 "handler parameters";
648 dm_put_device(ti, p->path.dev);
654 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
656 dm_put_device(ti, p->path.dev);
667 static struct priority_group *parse_priority_group(struct dm_arg_set *as,
670 static struct dm_arg _args[] = {
671 {1, 1024, "invalid number of paths"},
672 {0, 1024, "invalid number of selector args"}
676 unsigned i, nr_selector_args, nr_args;
677 struct priority_group *pg;
678 struct dm_target *ti = m->ti;
682 ti->error = "not enough priority group arguments";
683 return ERR_PTR(-EINVAL);
686 pg = alloc_priority_group();
688 ti->error = "couldn't allocate priority group";
689 return ERR_PTR(-ENOMEM);
693 r = parse_path_selector(as, pg, ti);
700 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
704 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
708 nr_args = 1 + nr_selector_args;
709 for (i = 0; i < pg->nr_pgpaths; i++) {
710 struct pgpath *pgpath;
711 struct dm_arg_set path_args;
713 if (as->argc < nr_args) {
714 ti->error = "not enough path parameters";
719 path_args.argc = nr_args;
720 path_args.argv = as->argv;
722 pgpath = parse_path(&path_args, &pg->ps, ti);
723 if (IS_ERR(pgpath)) {
729 list_add_tail(&pgpath->list, &pg->pgpaths);
730 dm_consume_args(as, nr_args);
736 free_priority_group(pg, ti);
740 static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
744 struct dm_target *ti = m->ti;
746 static struct dm_arg _args[] = {
747 {0, 1024, "invalid number of hardware handler args"},
750 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
756 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
762 for (i = 0; i <= hw_argc - 2; i++)
763 len += strlen(as->argv[i]) + 1;
764 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
766 ti->error = "memory allocation failed";
770 j = sprintf(p, "%d", hw_argc - 1);
771 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
772 j = sprintf(p, "%s", as->argv[i]);
774 dm_consume_args(as, hw_argc - 1);
778 kfree(m->hw_handler_name);
779 m->hw_handler_name = NULL;
783 static int parse_features(struct dm_arg_set *as, struct multipath *m)
787 struct dm_target *ti = m->ti;
788 const char *arg_name;
790 static struct dm_arg _args[] = {
791 {0, 6, "invalid number of feature args"},
792 {1, 50, "pg_init_retries must be between 1 and 50"},
793 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
796 r = dm_read_arg_group(_args, as, &argc, &ti->error);
804 arg_name = dm_shift_arg(as);
807 if (!strcasecmp(arg_name, "queue_if_no_path")) {
808 r = queue_if_no_path(m, 1, 0);
812 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
813 m->retain_attached_hw_handler = 1;
817 if (!strcasecmp(arg_name, "pg_init_retries") &&
819 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
824 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
826 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
831 ti->error = "Unrecognised multipath feature request";
833 } while (argc && !r);
838 static int multipath_ctr(struct dm_target *ti, unsigned int argc,
841 /* target arguments */
842 static struct dm_arg _args[] = {
843 {0, 1024, "invalid number of priority groups"},
844 {0, 1024, "invalid initial priority group number"},
849 struct dm_arg_set as;
850 unsigned pg_count = 0;
851 unsigned next_pg_num;
852 bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
857 m = alloc_multipath(ti, use_blk_mq);
859 ti->error = "can't allocate multipath";
863 r = parse_features(&as, m);
867 r = parse_hw_handler(&as, m);
871 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
875 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
879 if ((!m->nr_priority_groups && next_pg_num) ||
880 (m->nr_priority_groups && !next_pg_num)) {
881 ti->error = "invalid initial priority group";
886 /* parse the priority groups */
888 struct priority_group *pg;
890 pg = parse_priority_group(&as, m);
896 m->nr_valid_paths += pg->nr_pgpaths;
897 list_add_tail(&pg->list, &m->priority_groups);
899 pg->pg_num = pg_count;
904 if (pg_count != m->nr_priority_groups) {
905 ti->error = "priority group count mismatch";
910 ti->num_flush_bios = 1;
911 ti->num_discard_bios = 1;
912 ti->num_write_same_bios = 1;
914 ti->per_io_data_size = sizeof(struct dm_mpath_io);
923 static void multipath_wait_for_pg_init_completion(struct multipath *m)
925 DECLARE_WAITQUEUE(wait, current);
928 add_wait_queue(&m->pg_init_wait, &wait);
931 set_current_state(TASK_UNINTERRUPTIBLE);
933 spin_lock_irqsave(&m->lock, flags);
934 if (!m->pg_init_in_progress) {
935 spin_unlock_irqrestore(&m->lock, flags);
938 spin_unlock_irqrestore(&m->lock, flags);
942 set_current_state(TASK_RUNNING);
944 remove_wait_queue(&m->pg_init_wait, &wait);
947 static void flush_multipath_work(struct multipath *m)
951 spin_lock_irqsave(&m->lock, flags);
952 m->pg_init_disabled = 1;
953 spin_unlock_irqrestore(&m->lock, flags);
955 flush_workqueue(kmpath_handlerd);
956 multipath_wait_for_pg_init_completion(m);
957 flush_workqueue(kmultipathd);
958 flush_work(&m->trigger_event);
960 spin_lock_irqsave(&m->lock, flags);
961 m->pg_init_disabled = 0;
962 spin_unlock_irqrestore(&m->lock, flags);
965 static void multipath_dtr(struct dm_target *ti)
967 struct multipath *m = ti->private;
969 flush_multipath_work(m);
974 * Take a path out of use.
976 static int fail_path(struct pgpath *pgpath)
979 struct multipath *m = pgpath->pg->m;
981 spin_lock_irqsave(&m->lock, flags);
983 if (!pgpath->is_active)
986 DMWARN("Failing path %s.", pgpath->path.dev->name);
988 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
989 pgpath->is_active = 0;
990 pgpath->fail_count++;
994 if (pgpath == m->current_pgpath)
995 m->current_pgpath = NULL;
997 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
998 pgpath->path.dev->name, m->nr_valid_paths);
1000 schedule_work(&m->trigger_event);
1003 spin_unlock_irqrestore(&m->lock, flags);
1009 * Reinstate a previously-failed path
1011 static int reinstate_path(struct pgpath *pgpath)
1013 int r = 0, run_queue = 0;
1014 unsigned long flags;
1015 struct multipath *m = pgpath->pg->m;
1017 spin_lock_irqsave(&m->lock, flags);
1019 if (pgpath->is_active)
1022 if (!pgpath->pg->ps.type->reinstate_path) {
1023 DMWARN("Reinstate path not supported by path selector %s",
1024 pgpath->pg->ps.type->name);
1029 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1033 pgpath->is_active = 1;
1035 if (!m->nr_valid_paths++) {
1036 m->current_pgpath = NULL;
1038 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1039 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1040 m->pg_init_in_progress++;
1043 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1044 pgpath->path.dev->name, m->nr_valid_paths);
1046 schedule_work(&m->trigger_event);
1049 spin_unlock_irqrestore(&m->lock, flags);
1051 dm_table_run_md_queue_async(m->ti->table);
1057 * Fail or reinstate all paths that match the provided struct dm_dev.
1059 static int action_dev(struct multipath *m, struct dm_dev *dev,
1063 struct pgpath *pgpath;
1064 struct priority_group *pg;
1066 list_for_each_entry(pg, &m->priority_groups, list) {
1067 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1068 if (pgpath->path.dev == dev)
1077 * Temporarily try to avoid having to use the specified PG
1079 static void bypass_pg(struct multipath *m, struct priority_group *pg,
1082 unsigned long flags;
1084 spin_lock_irqsave(&m->lock, flags);
1086 pg->bypassed = bypassed;
1087 m->current_pgpath = NULL;
1088 m->current_pg = NULL;
1090 spin_unlock_irqrestore(&m->lock, flags);
1092 schedule_work(&m->trigger_event);
1096 * Switch to using the specified PG from the next I/O that gets mapped
1098 static int switch_pg_num(struct multipath *m, const char *pgstr)
1100 struct priority_group *pg;
1102 unsigned long flags;
1105 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1106 (pgnum > m->nr_priority_groups)) {
1107 DMWARN("invalid PG number supplied to switch_pg_num");
1111 spin_lock_irqsave(&m->lock, flags);
1112 list_for_each_entry(pg, &m->priority_groups, list) {
1117 m->current_pgpath = NULL;
1118 m->current_pg = NULL;
1121 spin_unlock_irqrestore(&m->lock, flags);
1123 schedule_work(&m->trigger_event);
1128 * Set/clear bypassed status of a PG.
1129 * PGs are numbered upwards from 1 in the order they were declared.
1131 static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1133 struct priority_group *pg;
1137 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1138 (pgnum > m->nr_priority_groups)) {
1139 DMWARN("invalid PG number supplied to bypass_pg");
1143 list_for_each_entry(pg, &m->priority_groups, list) {
1148 bypass_pg(m, pg, bypassed);
1153 * Should we retry pg_init immediately?
1155 static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1157 unsigned long flags;
1158 int limit_reached = 0;
1160 spin_lock_irqsave(&m->lock, flags);
1162 if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
1163 m->pg_init_required = 1;
1167 spin_unlock_irqrestore(&m->lock, flags);
1169 return limit_reached;
1172 static void pg_init_done(void *data, int errors)
1174 struct pgpath *pgpath = data;
1175 struct priority_group *pg = pgpath->pg;
1176 struct multipath *m = pg->m;
1177 unsigned long flags;
1178 unsigned delay_retry = 0;
1180 /* device or driver problems */
1185 if (!m->hw_handler_name) {
1189 DMERR("Could not failover the device: Handler scsi_dh_%s "
1190 "Error %d.", m->hw_handler_name, errors);
1192 * Fail path for now, so we do not ping pong
1196 case SCSI_DH_DEV_TEMP_BUSY:
1198 * Probably doing something like FW upgrade on the
1199 * controller so try the other pg.
1201 bypass_pg(m, pg, 1);
1204 /* Wait before retrying. */
1206 case SCSI_DH_IMM_RETRY:
1207 case SCSI_DH_RES_TEMP_UNAVAIL:
1208 if (pg_init_limit_reached(m, pgpath))
1214 * We probably do not want to fail the path for a device
1215 * error, but this is what the old dm did. In future
1216 * patches we can do more advanced handling.
1221 spin_lock_irqsave(&m->lock, flags);
1223 if (pgpath == m->current_pgpath) {
1224 DMERR("Could not failover device. Error %d.", errors);
1225 m->current_pgpath = NULL;
1226 m->current_pg = NULL;
1228 } else if (!m->pg_init_required)
1231 if (--m->pg_init_in_progress)
1232 /* Activations of other paths are still on going */
1235 if (m->pg_init_required) {
1236 m->pg_init_delay_retry = delay_retry;
1237 if (__pg_init_all_paths(m))
1243 * Wake up any thread waiting to suspend.
1245 wake_up(&m->pg_init_wait);
1248 spin_unlock_irqrestore(&m->lock, flags);
1251 static void activate_path(struct work_struct *work)
1253 struct pgpath *pgpath =
1254 container_of(work, struct pgpath, activate_path.work);
1256 if (pgpath->is_active)
1257 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1258 pg_init_done, pgpath);
1260 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1263 static int noretry_error(int error)
1274 /* Anything else could be a path failure, so should be retried */
1281 static int do_end_io(struct multipath *m, struct request *clone,
1282 int error, struct dm_mpath_io *mpio)
1285 * We don't queue any clone request inside the multipath target
1286 * during end I/O handling, since those clone requests don't have
1287 * bio clones. If we queue them inside the multipath target,
1288 * we need to make bio clones, that requires memory allocation.
1289 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1290 * don't have bio clones.)
1291 * Instead of queueing the clone request here, we queue the original
1292 * request into dm core, which will remake a clone request and
1293 * clone bios for it and resubmit it later.
1295 int r = DM_ENDIO_REQUEUE;
1296 unsigned long flags;
1298 if (!error && !clone->errors)
1299 return 0; /* I/O complete */
1301 if (noretry_error(error))
1305 fail_path(mpio->pgpath);
1307 spin_lock_irqsave(&m->lock, flags);
1308 if (!m->nr_valid_paths) {
1309 if (!m->queue_if_no_path) {
1310 if (!__must_push_back(m))
1313 if (error == -EBADE)
1317 spin_unlock_irqrestore(&m->lock, flags);
1322 static int multipath_end_io(struct dm_target *ti, struct request *clone,
1323 int error, union map_info *map_context)
1325 struct multipath *m = ti->private;
1326 struct dm_mpath_io *mpio = get_mpio(map_context);
1327 struct pgpath *pgpath;
1328 struct path_selector *ps;
1333 r = do_end_io(m, clone, error, mpio);
1334 pgpath = mpio->pgpath;
1336 ps = &pgpath->pg->ps;
1337 if (ps->type->end_io)
1338 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1340 clear_request_fn_mpio(m, map_context);
1346 * Suspend can't complete until all the I/O is processed so if
1347 * the last path fails we must error any remaining I/O.
1348 * Note that if the freeze_bdev fails while suspending, the
1349 * queue_if_no_path state is lost - userspace should reset it.
1351 static void multipath_presuspend(struct dm_target *ti)
1353 struct multipath *m = (struct multipath *) ti->private;
1355 queue_if_no_path(m, 0, 1);
1358 static void multipath_postsuspend(struct dm_target *ti)
1360 struct multipath *m = ti->private;
1362 mutex_lock(&m->work_mutex);
1363 flush_multipath_work(m);
1364 mutex_unlock(&m->work_mutex);
1368 * Restore the queue_if_no_path setting.
1370 static void multipath_resume(struct dm_target *ti)
1372 struct multipath *m = (struct multipath *) ti->private;
1373 unsigned long flags;
1375 spin_lock_irqsave(&m->lock, flags);
1376 m->queue_if_no_path = m->saved_queue_if_no_path;
1377 spin_unlock_irqrestore(&m->lock, flags);
1381 * Info output has the following format:
1382 * num_multipath_feature_args [multipath_feature_args]*
1383 * num_handler_status_args [handler_status_args]*
1384 * num_groups init_group_number
1385 * [A|D|E num_ps_status_args [ps_status_args]*
1386 * num_paths num_selector_args
1387 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1389 * Table output has the following format (identical to the constructor string):
1390 * num_feature_args [features_args]*
1391 * num_handler_args hw_handler [hw_handler_args]*
1392 * num_groups init_group_number
1393 * [priority selector-name num_ps_args [ps_args]*
1394 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1396 static void multipath_status(struct dm_target *ti, status_type_t type,
1397 unsigned status_flags, char *result, unsigned maxlen)
1400 unsigned long flags;
1401 struct multipath *m = (struct multipath *) ti->private;
1402 struct priority_group *pg;
1407 spin_lock_irqsave(&m->lock, flags);
1410 if (type == STATUSTYPE_INFO)
1411 DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
1413 DMEMIT("%u ", m->queue_if_no_path +
1414 (m->pg_init_retries > 0) * 2 +
1415 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1416 m->retain_attached_hw_handler);
1417 if (m->queue_if_no_path)
1418 DMEMIT("queue_if_no_path ");
1419 if (m->pg_init_retries)
1420 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1421 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1422 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1423 if (m->retain_attached_hw_handler)
1424 DMEMIT("retain_attached_hw_handler ");
1427 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1430 DMEMIT("1 %s ", m->hw_handler_name);
1432 DMEMIT("%u ", m->nr_priority_groups);
1435 pg_num = m->next_pg->pg_num;
1436 else if (m->current_pg)
1437 pg_num = m->current_pg->pg_num;
1439 pg_num = (m->nr_priority_groups ? 1 : 0);
1441 DMEMIT("%u ", pg_num);
1444 case STATUSTYPE_INFO:
1445 list_for_each_entry(pg, &m->priority_groups, list) {
1447 state = 'D'; /* Disabled */
1448 else if (pg == m->current_pg)
1449 state = 'A'; /* Currently Active */
1451 state = 'E'; /* Enabled */
1453 DMEMIT("%c ", state);
1455 if (pg->ps.type->status)
1456 sz += pg->ps.type->status(&pg->ps, NULL, type,
1462 DMEMIT("%u %u ", pg->nr_pgpaths,
1463 pg->ps.type->info_args);
1465 list_for_each_entry(p, &pg->pgpaths, list) {
1466 DMEMIT("%s %s %u ", p->path.dev->name,
1467 p->is_active ? "A" : "F",
1469 if (pg->ps.type->status)
1470 sz += pg->ps.type->status(&pg->ps,
1471 &p->path, type, result + sz,
1477 case STATUSTYPE_TABLE:
1478 list_for_each_entry(pg, &m->priority_groups, list) {
1479 DMEMIT("%s ", pg->ps.type->name);
1481 if (pg->ps.type->status)
1482 sz += pg->ps.type->status(&pg->ps, NULL, type,
1488 DMEMIT("%u %u ", pg->nr_pgpaths,
1489 pg->ps.type->table_args);
1491 list_for_each_entry(p, &pg->pgpaths, list) {
1492 DMEMIT("%s ", p->path.dev->name);
1493 if (pg->ps.type->status)
1494 sz += pg->ps.type->status(&pg->ps,
1495 &p->path, type, result + sz,
1502 spin_unlock_irqrestore(&m->lock, flags);
1505 static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1509 struct multipath *m = (struct multipath *) ti->private;
1512 mutex_lock(&m->work_mutex);
1514 if (dm_suspended(ti)) {
1520 if (!strcasecmp(argv[0], "queue_if_no_path")) {
1521 r = queue_if_no_path(m, 1, 0);
1523 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1524 r = queue_if_no_path(m, 0, 0);
1530 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1534 if (!strcasecmp(argv[0], "disable_group")) {
1535 r = bypass_pg_num(m, argv[1], 1);
1537 } else if (!strcasecmp(argv[0], "enable_group")) {
1538 r = bypass_pg_num(m, argv[1], 0);
1540 } else if (!strcasecmp(argv[0], "switch_group")) {
1541 r = switch_pg_num(m, argv[1]);
1543 } else if (!strcasecmp(argv[0], "reinstate_path"))
1544 action = reinstate_path;
1545 else if (!strcasecmp(argv[0], "fail_path"))
1548 DMWARN("Unrecognised multipath message received: %s", argv[0]);
1552 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1554 DMWARN("message: error getting device %s",
1559 r = action_dev(m, dev, action);
1561 dm_put_device(ti, dev);
1564 mutex_unlock(&m->work_mutex);
1568 static int multipath_prepare_ioctl(struct dm_target *ti,
1569 struct block_device **bdev, fmode_t *mode)
1571 struct multipath *m = ti->private;
1572 unsigned long flags;
1575 spin_lock_irqsave(&m->lock, flags);
1577 if (!m->current_pgpath)
1578 __choose_pgpath(m, 0);
1580 if (m->current_pgpath) {
1582 *bdev = m->current_pgpath->path.dev->bdev;
1583 *mode = m->current_pgpath->path.dev->mode;
1586 /* pg_init has not started or completed */
1590 /* No path is available */
1591 if (m->queue_if_no_path)
1597 spin_unlock_irqrestore(&m->lock, flags);
1599 if (r == -ENOTCONN) {
1600 spin_lock_irqsave(&m->lock, flags);
1601 if (!m->current_pg) {
1602 /* Path status changed, redo selection */
1603 __choose_pgpath(m, 0);
1605 if (m->pg_init_required)
1606 __pg_init_all_paths(m);
1607 spin_unlock_irqrestore(&m->lock, flags);
1608 dm_table_run_md_queue_async(m->ti->table);
1612 * Only pass ioctls through if the device sizes match exactly.
1614 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1619 static int multipath_iterate_devices(struct dm_target *ti,
1620 iterate_devices_callout_fn fn, void *data)
1622 struct multipath *m = ti->private;
1623 struct priority_group *pg;
1627 list_for_each_entry(pg, &m->priority_groups, list) {
1628 list_for_each_entry(p, &pg->pgpaths, list) {
1629 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1639 static int __pgpath_busy(struct pgpath *pgpath)
1641 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1643 return blk_lld_busy(q);
1647 * We return "busy", only when we can map I/Os but underlying devices
1648 * are busy (so even if we map I/Os now, the I/Os will wait on
1649 * the underlying queue).
1650 * In other words, if we want to kill I/Os or queue them inside us
1651 * due to map unavailability, we don't return "busy". Otherwise,
1652 * dm core won't give us the I/Os and we can't do what we want.
1654 static int multipath_busy(struct dm_target *ti)
1656 int busy = 0, has_active = 0;
1657 struct multipath *m = ti->private;
1658 struct priority_group *pg;
1659 struct pgpath *pgpath;
1660 unsigned long flags;
1662 spin_lock_irqsave(&m->lock, flags);
1664 /* pg_init in progress or no paths available */
1665 if (m->pg_init_in_progress ||
1666 (!m->nr_valid_paths && m->queue_if_no_path)) {
1670 /* Guess which priority_group will be used at next mapping time */
1671 if (unlikely(!m->current_pgpath && m->next_pg))
1673 else if (likely(m->current_pg))
1677 * We don't know which pg will be used at next mapping time.
1678 * We don't call __choose_pgpath() here to avoid to trigger
1679 * pg_init just by busy checking.
1680 * So we don't know whether underlying devices we will be using
1681 * at next mapping time are busy or not. Just try mapping.
1686 * If there is one non-busy active path at least, the path selector
1687 * will be able to select it. So we consider such a pg as not busy.
1690 list_for_each_entry(pgpath, &pg->pgpaths, list)
1691 if (pgpath->is_active) {
1694 if (!__pgpath_busy(pgpath)) {
1702 * No active path in this pg, so this pg won't be used and
1703 * the current_pg will be changed at next mapping time.
1704 * We need to try mapping to determine it.
1709 spin_unlock_irqrestore(&m->lock, flags);
1714 /*-----------------------------------------------------------------
1716 *---------------------------------------------------------------*/
1717 static struct target_type multipath_target = {
1718 .name = "multipath",
1719 .version = {1, 11, 0},
1720 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
1721 .module = THIS_MODULE,
1722 .ctr = multipath_ctr,
1723 .dtr = multipath_dtr,
1724 .map_rq = multipath_map,
1725 .clone_and_map_rq = multipath_clone_and_map,
1726 .release_clone_rq = multipath_release_clone,
1727 .rq_end_io = multipath_end_io,
1728 .presuspend = multipath_presuspend,
1729 .postsuspend = multipath_postsuspend,
1730 .resume = multipath_resume,
1731 .status = multipath_status,
1732 .message = multipath_message,
1733 .prepare_ioctl = multipath_prepare_ioctl,
1734 .iterate_devices = multipath_iterate_devices,
1735 .busy = multipath_busy,
1738 static int __init dm_multipath_init(void)
1742 /* allocate a slab for the dm_ios */
1743 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
1747 r = dm_register_target(&multipath_target);
1749 DMERR("register failed %d", r);
1751 goto bad_register_target;
1754 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1756 DMERR("failed to create workqueue kmpathd");
1758 goto bad_alloc_kmultipathd;
1762 * A separate workqueue is used to handle the device handlers
1763 * to avoid overloading existing workqueue. Overloading the
1764 * old workqueue would also create a bottleneck in the
1765 * path of the storage hardware device activation.
1767 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1769 if (!kmpath_handlerd) {
1770 DMERR("failed to create workqueue kmpath_handlerd");
1772 goto bad_alloc_kmpath_handlerd;
1775 DMINFO("version %u.%u.%u loaded",
1776 multipath_target.version[0], multipath_target.version[1],
1777 multipath_target.version[2]);
1781 bad_alloc_kmpath_handlerd:
1782 destroy_workqueue(kmultipathd);
1783 bad_alloc_kmultipathd:
1784 dm_unregister_target(&multipath_target);
1785 bad_register_target:
1786 kmem_cache_destroy(_mpio_cache);
1791 static void __exit dm_multipath_exit(void)
1793 destroy_workqueue(kmpath_handlerd);
1794 destroy_workqueue(kmultipathd);
1796 dm_unregister_target(&multipath_target);
1797 kmem_cache_destroy(_mpio_cache);
1800 module_init(dm_multipath_init);
1801 module_exit(dm_multipath_exit);
1803 MODULE_DESCRIPTION(DM_NAME " multipath target");
1804 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1805 MODULE_LICENSE("GPL");