dm mpath: push path selector locking down to path selectors
[linux-block.git] / drivers / md / dm-mpath.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
586e80e6
MP
8#include <linux/device-mapper.h>
9
f4790826 10#include "dm.h"
1da177e4 11#include "dm-path-selector.h"
b15546f9 12#include "dm-uevent.h"
1da177e4 13
e5863d9a 14#include <linux/blkdev.h>
1da177e4
LT
15#include <linux/ctype.h>
16#include <linux/init.h>
17#include <linux/mempool.h>
18#include <linux/module.h>
19#include <linux/pagemap.h>
20#include <linux/slab.h>
21#include <linux/time.h>
22#include <linux/workqueue.h>
35991652 23#include <linux/delay.h>
cfae5c9b 24#include <scsi/scsi_dh.h>
60063497 25#include <linux/atomic.h>
78ce23b5 26#include <linux/blk-mq.h>
1da177e4 27
72d94861 28#define DM_MSG_PREFIX "multipath"
4e2d19e4
CS
29#define DM_PG_INIT_DELAY_MSECS 2000
30#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
1da177e4
LT
31
32/* Path properties */
33struct pgpath {
34 struct list_head list;
35
36 struct priority_group *pg; /* Owning PG */
6680073d 37 unsigned is_active; /* Path status */
1da177e4
LT
38 unsigned fail_count; /* Cumulative failure count */
39
c922d5f7 40 struct dm_path path;
4e2d19e4 41 struct delayed_work activate_path;
1da177e4
LT
42};
43
44#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
45
46/*
47 * Paths are grouped into Priority Groups and numbered from 1 upwards.
48 * Each has a path selector which controls which path gets used.
49 */
50struct priority_group {
51 struct list_head list;
52
53 struct multipath *m; /* Owning multipath instance */
54 struct path_selector ps;
55
56 unsigned pg_num; /* Reference number */
57 unsigned bypassed; /* Temporarily bypass this PG? */
58
59 unsigned nr_pgpaths; /* Number of paths in PG */
60 struct list_head pgpaths;
61};
62
63/* Multipath context */
64struct multipath {
65 struct list_head list;
66 struct dm_target *ti;
67
cfae5c9b 68 const char *hw_handler_name;
2bfd2e13 69 char *hw_handler_params;
4e2d19e4 70
1fbdd2b3
MS
71 spinlock_t lock;
72
1da177e4
LT
73 unsigned nr_priority_groups;
74 struct list_head priority_groups;
4e2d19e4
CS
75
76 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
77
1da177e4 78 unsigned pg_init_required; /* pg_init needs calling? */
c3cd4f6b 79 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
4e2d19e4 80 unsigned pg_init_delay_retry; /* Delay pg_init retry? */
1da177e4
LT
81
82 unsigned nr_valid_paths; /* Total number of usable paths */
83 struct pgpath *current_pgpath;
84 struct priority_group *current_pg;
85 struct priority_group *next_pg; /* Switch to this PG if set */
1da177e4 86
1fbdd2b3
MS
87 unsigned queue_io:1; /* Must we queue all I/O? */
88 unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
89 unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
a58a935d 90 unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
954a73d5 91 unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
1fbdd2b3 92
c9e45581
DW
93 unsigned pg_init_retries; /* Number of times to retry pg_init */
94 unsigned pg_init_count; /* Number of times pg_init called */
4e2d19e4 95 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
1da177e4 96
1da177e4
LT
97 struct work_struct trigger_event;
98
99 /*
028867ac 100 * We must use a mempool of dm_mpath_io structs so that we
1da177e4
LT
101 * can resubmit bios on error.
102 */
103 mempool_t *mpio_pool;
6380f26f
MA
104
105 struct mutex work_mutex;
1da177e4
LT
106};
107
108/*
109 * Context information attached to each bio we process.
110 */
028867ac 111struct dm_mpath_io {
1da177e4 112 struct pgpath *pgpath;
02ab823f 113 size_t nr_bytes;
1da177e4
LT
114};
115
116typedef int (*action_fn) (struct pgpath *pgpath);
117
e18b890b 118static struct kmem_cache *_mpio_cache;
1da177e4 119
bab7cfc7 120static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
c4028958 121static void trigger_event(struct work_struct *work);
bab7cfc7 122static void activate_path(struct work_struct *work);
e8099177 123static int __pgpath_busy(struct pgpath *pgpath);
1da177e4
LT
124
125
126/*-----------------------------------------------
127 * Allocation routines
128 *-----------------------------------------------*/
129
130static struct pgpath *alloc_pgpath(void)
131{
e69fae56 132 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
1da177e4 133
224cb3e9 134 if (pgpath) {
6680073d 135 pgpath->is_active = 1;
4e2d19e4 136 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
224cb3e9 137 }
1da177e4
LT
138
139 return pgpath;
140}
141
028867ac 142static void free_pgpath(struct pgpath *pgpath)
1da177e4
LT
143{
144 kfree(pgpath);
145}
146
147static struct priority_group *alloc_priority_group(void)
148{
149 struct priority_group *pg;
150
e69fae56 151 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
1da177e4 152
e69fae56
MM
153 if (pg)
154 INIT_LIST_HEAD(&pg->pgpaths);
1da177e4
LT
155
156 return pg;
157}
158
159static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
160{
161 struct pgpath *pgpath, *tmp;
162
163 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
164 list_del(&pgpath->list);
165 dm_put_device(ti, pgpath->path.dev);
166 free_pgpath(pgpath);
167 }
168}
169
170static void free_priority_group(struct priority_group *pg,
171 struct dm_target *ti)
172{
173 struct path_selector *ps = &pg->ps;
174
175 if (ps->type) {
176 ps->type->destroy(ps);
177 dm_put_path_selector(ps->type);
178 }
179
180 free_pgpaths(&pg->pgpaths, ti);
181 kfree(pg);
182}
183
8637a6bf 184static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
1da177e4
LT
185{
186 struct multipath *m;
187
e69fae56 188 m = kzalloc(sizeof(*m), GFP_KERNEL);
1da177e4 189 if (m) {
1da177e4
LT
190 INIT_LIST_HEAD(&m->priority_groups);
191 spin_lock_init(&m->lock);
192 m->queue_io = 1;
4e2d19e4 193 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
c4028958 194 INIT_WORK(&m->trigger_event, trigger_event);
2bded7bd 195 init_waitqueue_head(&m->pg_init_wait);
6380f26f 196 mutex_init(&m->work_mutex);
8637a6bf
MS
197
198 m->mpio_pool = NULL;
199 if (!use_blk_mq) {
200 unsigned min_ios = dm_get_reserved_rq_based_ios();
201
202 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
203 if (!m->mpio_pool) {
204 kfree(m);
205 return NULL;
206 }
1da177e4 207 }
8637a6bf 208
28f16c20
MM
209 m->ti = ti;
210 ti->private = m;
1da177e4
LT
211 }
212
213 return m;
214}
215
216static void free_multipath(struct multipath *m)
217{
218 struct priority_group *pg, *tmp;
1da177e4
LT
219
220 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
221 list_del(&pg->list);
222 free_priority_group(pg, m->ti);
223 }
224
cfae5c9b 225 kfree(m->hw_handler_name);
2bfd2e13 226 kfree(m->hw_handler_params);
1da177e4
LT
227 mempool_destroy(m->mpio_pool);
228 kfree(m);
229}
230
2eff1924
MS
231static struct dm_mpath_io *get_mpio(union map_info *info)
232{
233 return info->ptr;
234}
235
236static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
466891f9
JN
237{
238 struct dm_mpath_io *mpio;
239
8637a6bf
MS
240 if (!m->mpio_pool) {
241 /* Use blk-mq pdu memory requested via per_io_data_size */
2eff1924 242 mpio = get_mpio(info);
8637a6bf
MS
243 memset(mpio, 0, sizeof(*mpio));
244 return mpio;
245 }
246
466891f9
JN
247 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
248 if (!mpio)
2eff1924 249 return NULL;
466891f9
JN
250
251 memset(mpio, 0, sizeof(*mpio));
252 info->ptr = mpio;
253
2eff1924 254 return mpio;
466891f9
JN
255}
256
2eff1924 257static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
466891f9 258{
2eff1924 259 /* Only needed for non blk-mq (.request_fn) multipath */
8637a6bf
MS
260 if (m->mpio_pool) {
261 struct dm_mpath_io *mpio = info->ptr;
466891f9 262
8637a6bf
MS
263 info->ptr = NULL;
264 mempool_free(mpio, m->mpio_pool);
265 }
466891f9 266}
1da177e4
LT
267
268/*-----------------------------------------------
269 * Path selection
270 *-----------------------------------------------*/
271
3e9f1be1 272static int __pg_init_all_paths(struct multipath *m)
fb612642
KU
273{
274 struct pgpath *pgpath;
4e2d19e4 275 unsigned long pg_init_delay = 0;
fb612642 276
17f4ff45 277 if (m->pg_init_in_progress || m->pg_init_disabled)
3e9f1be1 278 return 0;
17f4ff45 279
fb612642
KU
280 m->pg_init_count++;
281 m->pg_init_required = 0;
3e9f1be1
HR
282
283 /* Check here to reset pg_init_required */
284 if (!m->current_pg)
285 return 0;
286
4e2d19e4
CS
287 if (m->pg_init_delay_retry)
288 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
289 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
fb612642
KU
290 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
291 /* Skip failed paths */
292 if (!pgpath->is_active)
293 continue;
4e2d19e4
CS
294 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
295 pg_init_delay))
fb612642
KU
296 m->pg_init_in_progress++;
297 }
3e9f1be1 298 return m->pg_init_in_progress;
fb612642
KU
299}
300
1da177e4
LT
301static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
302{
1da177e4
LT
303 m->current_pg = pgpath->pg;
304
305 /* Must we initialise the PG first, and queue I/O till it's ready? */
cfae5c9b 306 if (m->hw_handler_name) {
1da177e4
LT
307 m->pg_init_required = 1;
308 m->queue_io = 1;
309 } else {
310 m->pg_init_required = 0;
311 m->queue_io = 0;
312 }
c9e45581
DW
313
314 m->pg_init_count = 0;
1da177e4
LT
315}
316
02ab823f
KU
317static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
318 size_t nr_bytes)
1da177e4 319{
c922d5f7 320 struct dm_path *path;
21136f89 321 unsigned repeat_count;
1da177e4 322
21136f89 323 path = pg->ps.type->select_path(&pg->ps, &repeat_count, nr_bytes);
1da177e4
LT
324 if (!path)
325 return -ENXIO;
326
327 m->current_pgpath = path_to_pgpath(path);
328
329 if (m->current_pg != pg)
330 __switch_pg(m, m->current_pgpath);
331
332 return 0;
333}
334
02ab823f 335static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
1da177e4
LT
336{
337 struct priority_group *pg;
338 unsigned bypassed = 1;
339
1f271972
BM
340 if (!m->nr_valid_paths) {
341 m->queue_io = 0;
1da177e4 342 goto failed;
1f271972 343 }
1da177e4
LT
344
345 /* Were we instructed to switch PG? */
346 if (m->next_pg) {
347 pg = m->next_pg;
348 m->next_pg = NULL;
02ab823f 349 if (!__choose_path_in_pg(m, pg, nr_bytes))
1da177e4
LT
350 return;
351 }
352
353 /* Don't change PG until it has no remaining paths */
02ab823f 354 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
1da177e4
LT
355 return;
356
357 /*
358 * Loop through priority groups until we find a valid path.
359 * First time we skip PGs marked 'bypassed'.
f220fd4e
MC
360 * Second time we only try the ones we skipped, but set
361 * pg_init_delay_retry so we do not hammer controllers.
1da177e4
LT
362 */
363 do {
364 list_for_each_entry(pg, &m->priority_groups, list) {
365 if (pg->bypassed == bypassed)
366 continue;
f220fd4e
MC
367 if (!__choose_path_in_pg(m, pg, nr_bytes)) {
368 if (!bypassed)
369 m->pg_init_delay_retry = 1;
1da177e4 370 return;
f220fd4e 371 }
1da177e4
LT
372 }
373 } while (bypassed--);
374
375failed:
376 m->current_pgpath = NULL;
377 m->current_pg = NULL;
378}
379
45e15720
KU
380/*
381 * Check whether bios must be queued in the device-mapper core rather
382 * than here in the target.
383 *
384 * m->lock must be held on entry.
385 *
386 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
387 * same value then we are not between multipath_presuspend()
388 * and multipath_resume() calls and we have no need to check
389 * for the DMF_NOFLUSH_SUSPENDING flag.
390 */
391static int __must_push_back(struct multipath *m)
392{
e8099177
HR
393 return (m->queue_if_no_path ||
394 (m->queue_if_no_path != m->saved_queue_if_no_path &&
395 dm_noflush_suspending(m->ti)));
45e15720
KU
396}
397
36fcffcc
HR
398/*
399 * Map cloned requests
400 */
e5863d9a
MS
401static int __multipath_map(struct dm_target *ti, struct request *clone,
402 union map_info *map_context,
403 struct request *rq, struct request **__clone)
1da177e4 404{
7943bd6d 405 struct multipath *m = ti->private;
e3bde04f 406 int r = DM_MAPIO_REQUEUE;
e5863d9a 407 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
1da177e4 408 struct pgpath *pgpath;
f40c67f0 409 struct block_device *bdev;
e3bde04f 410 struct dm_mpath_io *mpio;
1da177e4 411
2eb6e1e3 412 spin_lock_irq(&m->lock);
1da177e4
LT
413
414 /* Do we need to select a new pgpath? */
21136f89 415 if (!m->current_pgpath || !m->queue_io)
02ab823f 416 __choose_pgpath(m, nr_bytes);
1da177e4
LT
417
418 pgpath = m->current_pgpath;
419
9bf59a61
MS
420 if (!pgpath) {
421 if (!__must_push_back(m))
422 r = -EIO; /* Failed */
423 goto out_unlock;
6afbc01d 424 } else if (m->queue_io || m->pg_init_required) {
e3bde04f 425 __pg_init_all_paths(m);
9bf59a61
MS
426 goto out_unlock;
427 }
6afbc01d 428
2eff1924
MS
429 mpio = set_mpio(m, map_context);
430 if (!mpio)
9bf59a61
MS
431 /* ENOMEM, requeue */
432 goto out_unlock;
433
2eb6e1e3
KB
434 mpio->pgpath = pgpath;
435 mpio->nr_bytes = nr_bytes;
436
9bf59a61 437 bdev = pgpath->path.dev->bdev;
2eb6e1e3 438
2eb6e1e3
KB
439 spin_unlock_irq(&m->lock);
440
e5863d9a 441 if (clone) {
c5248f79
MS
442 /*
443 * Old request-based interface: allocated clone is passed in.
444 * Used by: .request_fn stacked on .request_fn path(s).
445 */
e5863d9a
MS
446 clone->q = bdev_get_queue(bdev);
447 clone->rq_disk = bdev->bd_disk;
448 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
449 } else {
eca7ee6d
MS
450 /*
451 * blk-mq request-based interface; used by both:
452 * .request_fn stacked on blk-mq path(s) and
453 * blk-mq stacked on blk-mq path(s).
454 */
78ce23b5
MS
455 *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
456 rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
4c6dd53d 457 if (IS_ERR(*__clone)) {
e5863d9a 458 /* ENOMEM, requeue */
2eff1924 459 clear_request_fn_mpio(m, map_context);
e5863d9a 460 return r;
4c6dd53d 461 }
e5863d9a
MS
462 (*__clone)->bio = (*__clone)->biotail = NULL;
463 (*__clone)->rq_disk = bdev->bd_disk;
464 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
465 }
466
9bf59a61
MS
467 if (pgpath->pg->ps.type->start_io)
468 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
469 &pgpath->path,
470 nr_bytes);
2eb6e1e3 471 return DM_MAPIO_REMAPPED;
1da177e4 472
e3bde04f 473out_unlock:
2eb6e1e3 474 spin_unlock_irq(&m->lock);
1da177e4
LT
475
476 return r;
477}
478
e5863d9a
MS
479static int multipath_map(struct dm_target *ti, struct request *clone,
480 union map_info *map_context)
481{
482 return __multipath_map(ti, clone, map_context, NULL, NULL);
483}
484
485static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
486 union map_info *map_context,
487 struct request **clone)
488{
489 return __multipath_map(ti, NULL, map_context, rq, clone);
490}
491
492static void multipath_release_clone(struct request *clone)
493{
78ce23b5 494 blk_mq_free_request(clone);
e5863d9a
MS
495}
496
1da177e4
LT
497/*
498 * If we run out of usable paths, should we queue I/O or error it?
499 */
485ef69e
AK
500static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
501 unsigned save_old_value)
1da177e4
LT
502{
503 unsigned long flags;
504
505 spin_lock_irqsave(&m->lock, flags);
506
485ef69e
AK
507 if (save_old_value)
508 m->saved_queue_if_no_path = m->queue_if_no_path;
509 else
510 m->saved_queue_if_no_path = queue_if_no_path;
1da177e4 511 m->queue_if_no_path = queue_if_no_path;
1da177e4
LT
512 spin_unlock_irqrestore(&m->lock, flags);
513
63d832c3
HR
514 if (!queue_if_no_path)
515 dm_table_run_md_queue_async(m->ti->table);
516
1da177e4
LT
517 return 0;
518}
519
1da177e4
LT
520/*
521 * An event is triggered whenever a path is taken out of use.
522 * Includes path failure and PG bypass.
523 */
c4028958 524static void trigger_event(struct work_struct *work)
1da177e4 525{
c4028958
DH
526 struct multipath *m =
527 container_of(work, struct multipath, trigger_event);
1da177e4
LT
528
529 dm_table_event(m->ti->table);
530}
531
532/*-----------------------------------------------------------------
533 * Constructor/argument parsing:
534 * <#multipath feature args> [<arg>]*
535 * <#hw_handler args> [hw_handler [<arg>]*]
536 * <#priority groups>
537 * <initial priority group>
538 * [<selector> <#selector args> [<arg>]*
539 * <#paths> <#per-path selector args>
540 * [<path> [<arg>]* ]+ ]+
541 *---------------------------------------------------------------*/
498f0103 542static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
1da177e4
LT
543 struct dm_target *ti)
544{
545 int r;
546 struct path_selector_type *pst;
547 unsigned ps_argc;
548
498f0103 549 static struct dm_arg _args[] = {
72d94861 550 {0, 1024, "invalid number of path selector args"},
1da177e4
LT
551 };
552
498f0103 553 pst = dm_get_path_selector(dm_shift_arg(as));
1da177e4 554 if (!pst) {
72d94861 555 ti->error = "unknown path selector type";
1da177e4
LT
556 return -EINVAL;
557 }
558
498f0103 559 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
371b2e34
MP
560 if (r) {
561 dm_put_path_selector(pst);
1da177e4 562 return -EINVAL;
371b2e34 563 }
1da177e4
LT
564
565 r = pst->create(&pg->ps, ps_argc, as->argv);
566 if (r) {
567 dm_put_path_selector(pst);
72d94861 568 ti->error = "path selector constructor failed";
1da177e4
LT
569 return r;
570 }
571
572 pg->ps.type = pst;
498f0103 573 dm_consume_args(as, ps_argc);
1da177e4
LT
574
575 return 0;
576}
577
498f0103 578static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
1da177e4
LT
579 struct dm_target *ti)
580{
581 int r;
582 struct pgpath *p;
ae11b1b3 583 struct multipath *m = ti->private;
a58a935d
MS
584 struct request_queue *q = NULL;
585 const char *attached_handler_name;
1da177e4
LT
586
587 /* we need at least a path arg */
588 if (as->argc < 1) {
72d94861 589 ti->error = "no device given";
01460f35 590 return ERR_PTR(-EINVAL);
1da177e4
LT
591 }
592
593 p = alloc_pgpath();
594 if (!p)
01460f35 595 return ERR_PTR(-ENOMEM);
1da177e4 596
498f0103 597 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
8215d6ec 598 &p->path.dev);
1da177e4 599 if (r) {
72d94861 600 ti->error = "error getting device";
1da177e4
LT
601 goto bad;
602 }
603
a58a935d
MS
604 if (m->retain_attached_hw_handler || m->hw_handler_name)
605 q = bdev_get_queue(p->path.dev->bdev);
606
607 if (m->retain_attached_hw_handler) {
1bab0de0 608retain:
a58a935d
MS
609 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
610 if (attached_handler_name) {
611 /*
612 * Reset hw_handler_name to match the attached handler
613 * and clear any hw_handler_params associated with the
614 * ignored handler.
615 *
616 * NB. This modifies the table line to show the actual
617 * handler instead of the original table passed in.
618 */
619 kfree(m->hw_handler_name);
620 m->hw_handler_name = attached_handler_name;
621
622 kfree(m->hw_handler_params);
623 m->hw_handler_params = NULL;
624 }
625 }
a0cf7ea9 626
a58a935d 627 if (m->hw_handler_name) {
a0cf7ea9
HR
628 r = scsi_dh_attach(q, m->hw_handler_name);
629 if (r == -EBUSY) {
1bab0de0 630 char b[BDEVNAME_SIZE];
a0cf7ea9 631
1bab0de0
CH
632 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
633 bdevname(p->path.dev->bdev, b));
634 goto retain;
635 }
ae11b1b3 636 if (r < 0) {
a0cf7ea9 637 ti->error = "error attaching hardware handler";
ae11b1b3
HR
638 dm_put_device(ti, p->path.dev);
639 goto bad;
640 }
2bfd2e13
CS
641
642 if (m->hw_handler_params) {
643 r = scsi_dh_set_params(q, m->hw_handler_params);
644 if (r < 0) {
645 ti->error = "unable to set hardware "
646 "handler parameters";
2bfd2e13
CS
647 dm_put_device(ti, p->path.dev);
648 goto bad;
649 }
650 }
ae11b1b3
HR
651 }
652
1da177e4
LT
653 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
654 if (r) {
655 dm_put_device(ti, p->path.dev);
656 goto bad;
657 }
658
659 return p;
660
661 bad:
662 free_pgpath(p);
01460f35 663 return ERR_PTR(r);
1da177e4
LT
664}
665
498f0103 666static struct priority_group *parse_priority_group(struct dm_arg_set *as,
28f16c20 667 struct multipath *m)
1da177e4 668{
498f0103 669 static struct dm_arg _args[] = {
72d94861
AK
670 {1, 1024, "invalid number of paths"},
671 {0, 1024, "invalid number of selector args"}
1da177e4
LT
672 };
673
674 int r;
498f0103 675 unsigned i, nr_selector_args, nr_args;
1da177e4 676 struct priority_group *pg;
28f16c20 677 struct dm_target *ti = m->ti;
1da177e4
LT
678
679 if (as->argc < 2) {
680 as->argc = 0;
01460f35
BM
681 ti->error = "not enough priority group arguments";
682 return ERR_PTR(-EINVAL);
1da177e4
LT
683 }
684
685 pg = alloc_priority_group();
686 if (!pg) {
72d94861 687 ti->error = "couldn't allocate priority group";
01460f35 688 return ERR_PTR(-ENOMEM);
1da177e4
LT
689 }
690 pg->m = m;
691
692 r = parse_path_selector(as, pg, ti);
693 if (r)
694 goto bad;
695
696 /*
697 * read the paths
698 */
498f0103 699 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
1da177e4
LT
700 if (r)
701 goto bad;
702
498f0103 703 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
1da177e4
LT
704 if (r)
705 goto bad;
706
498f0103 707 nr_args = 1 + nr_selector_args;
1da177e4
LT
708 for (i = 0; i < pg->nr_pgpaths; i++) {
709 struct pgpath *pgpath;
498f0103 710 struct dm_arg_set path_args;
1da177e4 711
498f0103 712 if (as->argc < nr_args) {
148acff6 713 ti->error = "not enough path parameters";
6bbf79a1 714 r = -EINVAL;
1da177e4 715 goto bad;
148acff6 716 }
1da177e4 717
498f0103 718 path_args.argc = nr_args;
1da177e4
LT
719 path_args.argv = as->argv;
720
721 pgpath = parse_path(&path_args, &pg->ps, ti);
01460f35
BM
722 if (IS_ERR(pgpath)) {
723 r = PTR_ERR(pgpath);
1da177e4 724 goto bad;
01460f35 725 }
1da177e4
LT
726
727 pgpath->pg = pg;
728 list_add_tail(&pgpath->list, &pg->pgpaths);
498f0103 729 dm_consume_args(as, nr_args);
1da177e4
LT
730 }
731
732 return pg;
733
734 bad:
735 free_priority_group(pg, ti);
01460f35 736 return ERR_PTR(r);
1da177e4
LT
737}
738
498f0103 739static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1da177e4 740{
1da177e4 741 unsigned hw_argc;
2bfd2e13 742 int ret;
28f16c20 743 struct dm_target *ti = m->ti;
1da177e4 744
498f0103 745 static struct dm_arg _args[] = {
72d94861 746 {0, 1024, "invalid number of hardware handler args"},
1da177e4
LT
747 };
748
498f0103 749 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
1da177e4
LT
750 return -EINVAL;
751
752 if (!hw_argc)
753 return 0;
754
498f0103 755 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
14e98c5c 756
2bfd2e13
CS
757 if (hw_argc > 1) {
758 char *p;
759 int i, j, len = 4;
760
761 for (i = 0; i <= hw_argc - 2; i++)
762 len += strlen(as->argv[i]) + 1;
763 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
764 if (!p) {
765 ti->error = "memory allocation failed";
766 ret = -ENOMEM;
767 goto fail;
768 }
769 j = sprintf(p, "%d", hw_argc - 1);
770 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
771 j = sprintf(p, "%s", as->argv[i]);
772 }
498f0103 773 dm_consume_args(as, hw_argc - 1);
1da177e4
LT
774
775 return 0;
2bfd2e13
CS
776fail:
777 kfree(m->hw_handler_name);
778 m->hw_handler_name = NULL;
779 return ret;
1da177e4
LT
780}
781
498f0103 782static int parse_features(struct dm_arg_set *as, struct multipath *m)
1da177e4
LT
783{
784 int r;
785 unsigned argc;
28f16c20 786 struct dm_target *ti = m->ti;
498f0103 787 const char *arg_name;
1da177e4 788
498f0103 789 static struct dm_arg _args[] = {
a58a935d 790 {0, 6, "invalid number of feature args"},
c9e45581 791 {1, 50, "pg_init_retries must be between 1 and 50"},
4e2d19e4 792 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1da177e4
LT
793 };
794
498f0103 795 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1da177e4
LT
796 if (r)
797 return -EINVAL;
798
799 if (!argc)
800 return 0;
801
c9e45581 802 do {
498f0103 803 arg_name = dm_shift_arg(as);
c9e45581
DW
804 argc--;
805
498f0103 806 if (!strcasecmp(arg_name, "queue_if_no_path")) {
c9e45581
DW
807 r = queue_if_no_path(m, 1, 0);
808 continue;
809 }
810
a58a935d
MS
811 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
812 m->retain_attached_hw_handler = 1;
813 continue;
814 }
815
498f0103 816 if (!strcasecmp(arg_name, "pg_init_retries") &&
c9e45581 817 (argc >= 1)) {
498f0103 818 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
c9e45581
DW
819 argc--;
820 continue;
821 }
822
498f0103 823 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
4e2d19e4 824 (argc >= 1)) {
498f0103 825 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
4e2d19e4
CS
826 argc--;
827 continue;
828 }
829
1da177e4 830 ti->error = "Unrecognised multipath feature request";
c9e45581
DW
831 r = -EINVAL;
832 } while (argc && !r);
833
834 return r;
1da177e4
LT
835}
836
837static int multipath_ctr(struct dm_target *ti, unsigned int argc,
838 char **argv)
839{
498f0103
MS
840 /* target arguments */
841 static struct dm_arg _args[] = {
a490a07a
MS
842 {0, 1024, "invalid number of priority groups"},
843 {0, 1024, "invalid initial priority group number"},
1da177e4
LT
844 };
845
846 int r;
847 struct multipath *m;
498f0103 848 struct dm_arg_set as;
1da177e4
LT
849 unsigned pg_count = 0;
850 unsigned next_pg_num;
8637a6bf 851 bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
1da177e4
LT
852
853 as.argc = argc;
854 as.argv = argv;
855
8637a6bf 856 m = alloc_multipath(ti, use_blk_mq);
1da177e4 857 if (!m) {
72d94861 858 ti->error = "can't allocate multipath";
1da177e4
LT
859 return -EINVAL;
860 }
861
28f16c20 862 r = parse_features(&as, m);
1da177e4
LT
863 if (r)
864 goto bad;
865
28f16c20 866 r = parse_hw_handler(&as, m);
1da177e4
LT
867 if (r)
868 goto bad;
869
498f0103 870 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1da177e4
LT
871 if (r)
872 goto bad;
873
498f0103 874 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1da177e4
LT
875 if (r)
876 goto bad;
877
a490a07a
MS
878 if ((!m->nr_priority_groups && next_pg_num) ||
879 (m->nr_priority_groups && !next_pg_num)) {
880 ti->error = "invalid initial priority group";
881 r = -EINVAL;
882 goto bad;
883 }
884
1da177e4
LT
885 /* parse the priority groups */
886 while (as.argc) {
887 struct priority_group *pg;
888
28f16c20 889 pg = parse_priority_group(&as, m);
01460f35
BM
890 if (IS_ERR(pg)) {
891 r = PTR_ERR(pg);
1da177e4
LT
892 goto bad;
893 }
894
895 m->nr_valid_paths += pg->nr_pgpaths;
896 list_add_tail(&pg->list, &m->priority_groups);
897 pg_count++;
898 pg->pg_num = pg_count;
899 if (!--next_pg_num)
900 m->next_pg = pg;
901 }
902
903 if (pg_count != m->nr_priority_groups) {
72d94861 904 ti->error = "priority group count mismatch";
1da177e4
LT
905 r = -EINVAL;
906 goto bad;
907 }
908
55a62eef
AK
909 ti->num_flush_bios = 1;
910 ti->num_discard_bios = 1;
042bcef8 911 ti->num_write_same_bios = 1;
8637a6bf
MS
912 if (use_blk_mq)
913 ti->per_io_data_size = sizeof(struct dm_mpath_io);
8627921f 914
1da177e4
LT
915 return 0;
916
917 bad:
918 free_multipath(m);
919 return r;
920}
921
2bded7bd
KU
922static void multipath_wait_for_pg_init_completion(struct multipath *m)
923{
924 DECLARE_WAITQUEUE(wait, current);
925 unsigned long flags;
926
927 add_wait_queue(&m->pg_init_wait, &wait);
928
929 while (1) {
930 set_current_state(TASK_UNINTERRUPTIBLE);
931
932 spin_lock_irqsave(&m->lock, flags);
933 if (!m->pg_init_in_progress) {
934 spin_unlock_irqrestore(&m->lock, flags);
935 break;
936 }
937 spin_unlock_irqrestore(&m->lock, flags);
938
939 io_schedule();
940 }
941 set_current_state(TASK_RUNNING);
942
943 remove_wait_queue(&m->pg_init_wait, &wait);
944}
945
946static void flush_multipath_work(struct multipath *m)
1da177e4 947{
954a73d5
SKM
948 unsigned long flags;
949
950 spin_lock_irqsave(&m->lock, flags);
951 m->pg_init_disabled = 1;
952 spin_unlock_irqrestore(&m->lock, flags);
953
bab7cfc7 954 flush_workqueue(kmpath_handlerd);
2bded7bd 955 multipath_wait_for_pg_init_completion(m);
a044d016 956 flush_workqueue(kmultipathd);
43829731 957 flush_work(&m->trigger_event);
954a73d5
SKM
958
959 spin_lock_irqsave(&m->lock, flags);
960 m->pg_init_disabled = 0;
961 spin_unlock_irqrestore(&m->lock, flags);
6df400ab
KU
962}
963
964static void multipath_dtr(struct dm_target *ti)
965{
966 struct multipath *m = ti->private;
967
2bded7bd 968 flush_multipath_work(m);
1da177e4
LT
969 free_multipath(m);
970}
971
1da177e4
LT
972/*
973 * Take a path out of use.
974 */
975static int fail_path(struct pgpath *pgpath)
976{
977 unsigned long flags;
978 struct multipath *m = pgpath->pg->m;
979
980 spin_lock_irqsave(&m->lock, flags);
981
6680073d 982 if (!pgpath->is_active)
1da177e4
LT
983 goto out;
984
72d94861 985 DMWARN("Failing path %s.", pgpath->path.dev->name);
1da177e4
LT
986
987 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
6680073d 988 pgpath->is_active = 0;
1da177e4
LT
989 pgpath->fail_count++;
990
991 m->nr_valid_paths--;
992
993 if (pgpath == m->current_pgpath)
994 m->current_pgpath = NULL;
995
b15546f9
MA
996 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
997 pgpath->path.dev->name, m->nr_valid_paths);
998
fe9cf30e 999 schedule_work(&m->trigger_event);
1da177e4
LT
1000
1001out:
1002 spin_unlock_irqrestore(&m->lock, flags);
1003
1004 return 0;
1005}
1006
1007/*
1008 * Reinstate a previously-failed path
1009 */
1010static int reinstate_path(struct pgpath *pgpath)
1011{
63d832c3 1012 int r = 0, run_queue = 0;
1da177e4
LT
1013 unsigned long flags;
1014 struct multipath *m = pgpath->pg->m;
1015
1016 spin_lock_irqsave(&m->lock, flags);
1017
6680073d 1018 if (pgpath->is_active)
1da177e4
LT
1019 goto out;
1020
def052d2 1021 if (!pgpath->pg->ps.type->reinstate_path) {
1da177e4
LT
1022 DMWARN("Reinstate path not supported by path selector %s",
1023 pgpath->pg->ps.type->name);
1024 r = -EINVAL;
1025 goto out;
1026 }
1027
1028 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1029 if (r)
1030 goto out;
1031
6680073d 1032 pgpath->is_active = 1;
1da177e4 1033
e8099177 1034 if (!m->nr_valid_paths++) {
e54f77dd 1035 m->current_pgpath = NULL;
63d832c3 1036 run_queue = 1;
e54f77dd 1037 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
4e2d19e4 1038 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
e54f77dd
CS
1039 m->pg_init_in_progress++;
1040 }
1da177e4 1041
b15546f9
MA
1042 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1043 pgpath->path.dev->name, m->nr_valid_paths);
1044
fe9cf30e 1045 schedule_work(&m->trigger_event);
1da177e4
LT
1046
1047out:
1048 spin_unlock_irqrestore(&m->lock, flags);
63d832c3
HR
1049 if (run_queue)
1050 dm_table_run_md_queue_async(m->ti->table);
1da177e4
LT
1051
1052 return r;
1053}
1054
1055/*
1056 * Fail or reinstate all paths that match the provided struct dm_dev.
1057 */
1058static int action_dev(struct multipath *m, struct dm_dev *dev,
1059 action_fn action)
1060{
19040c0b 1061 int r = -EINVAL;
1da177e4
LT
1062 struct pgpath *pgpath;
1063 struct priority_group *pg;
1064
1065 list_for_each_entry(pg, &m->priority_groups, list) {
1066 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1067 if (pgpath->path.dev == dev)
1068 r = action(pgpath);
1069 }
1070 }
1071
1072 return r;
1073}
1074
1075/*
1076 * Temporarily try to avoid having to use the specified PG
1077 */
1078static void bypass_pg(struct multipath *m, struct priority_group *pg,
1079 int bypassed)
1080{
1081 unsigned long flags;
1082
1083 spin_lock_irqsave(&m->lock, flags);
1084
1085 pg->bypassed = bypassed;
1086 m->current_pgpath = NULL;
1087 m->current_pg = NULL;
1088
1089 spin_unlock_irqrestore(&m->lock, flags);
1090
fe9cf30e 1091 schedule_work(&m->trigger_event);
1da177e4
LT
1092}
1093
1094/*
1095 * Switch to using the specified PG from the next I/O that gets mapped
1096 */
1097static int switch_pg_num(struct multipath *m, const char *pgstr)
1098{
1099 struct priority_group *pg;
1100 unsigned pgnum;
1101 unsigned long flags;
31998ef1 1102 char dummy;
1da177e4 1103
31998ef1 1104 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1da177e4
LT
1105 (pgnum > m->nr_priority_groups)) {
1106 DMWARN("invalid PG number supplied to switch_pg_num");
1107 return -EINVAL;
1108 }
1109
1110 spin_lock_irqsave(&m->lock, flags);
1111 list_for_each_entry(pg, &m->priority_groups, list) {
1112 pg->bypassed = 0;
1113 if (--pgnum)
1114 continue;
1115
1116 m->current_pgpath = NULL;
1117 m->current_pg = NULL;
1118 m->next_pg = pg;
1119 }
1120 spin_unlock_irqrestore(&m->lock, flags);
1121
fe9cf30e 1122 schedule_work(&m->trigger_event);
1da177e4
LT
1123 return 0;
1124}
1125
1126/*
1127 * Set/clear bypassed status of a PG.
1128 * PGs are numbered upwards from 1 in the order they were declared.
1129 */
1130static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1131{
1132 struct priority_group *pg;
1133 unsigned pgnum;
31998ef1 1134 char dummy;
1da177e4 1135
31998ef1 1136 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1da177e4
LT
1137 (pgnum > m->nr_priority_groups)) {
1138 DMWARN("invalid PG number supplied to bypass_pg");
1139 return -EINVAL;
1140 }
1141
1142 list_for_each_entry(pg, &m->priority_groups, list) {
1143 if (!--pgnum)
1144 break;
1145 }
1146
1147 bypass_pg(m, pg, bypassed);
1148 return 0;
1149}
1150
c9e45581
DW
1151/*
1152 * Should we retry pg_init immediately?
1153 */
1154static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1155{
1156 unsigned long flags;
1157 int limit_reached = 0;
1158
1159 spin_lock_irqsave(&m->lock, flags);
1160
954a73d5 1161 if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
c9e45581
DW
1162 m->pg_init_required = 1;
1163 else
1164 limit_reached = 1;
1165
1166 spin_unlock_irqrestore(&m->lock, flags);
1167
1168 return limit_reached;
1169}
1170
3ae31f6a 1171static void pg_init_done(void *data, int errors)
cfae5c9b 1172{
83c0d5d5 1173 struct pgpath *pgpath = data;
cfae5c9b
CS
1174 struct priority_group *pg = pgpath->pg;
1175 struct multipath *m = pg->m;
1176 unsigned long flags;
4e2d19e4 1177 unsigned delay_retry = 0;
cfae5c9b
CS
1178
1179 /* device or driver problems */
1180 switch (errors) {
1181 case SCSI_DH_OK:
1182 break;
1183 case SCSI_DH_NOSYS:
1184 if (!m->hw_handler_name) {
1185 errors = 0;
1186 break;
1187 }
f7b934c8
MB
1188 DMERR("Could not failover the device: Handler scsi_dh_%s "
1189 "Error %d.", m->hw_handler_name, errors);
cfae5c9b
CS
1190 /*
1191 * Fail path for now, so we do not ping pong
1192 */
1193 fail_path(pgpath);
1194 break;
1195 case SCSI_DH_DEV_TEMP_BUSY:
1196 /*
1197 * Probably doing something like FW upgrade on the
1198 * controller so try the other pg.
1199 */
1200 bypass_pg(m, pg, 1);
1201 break;
cfae5c9b 1202 case SCSI_DH_RETRY:
4e2d19e4
CS
1203 /* Wait before retrying. */
1204 delay_retry = 1;
cfae5c9b
CS
1205 case SCSI_DH_IMM_RETRY:
1206 case SCSI_DH_RES_TEMP_UNAVAIL:
1207 if (pg_init_limit_reached(m, pgpath))
1208 fail_path(pgpath);
1209 errors = 0;
1210 break;
1211 default:
1212 /*
1213 * We probably do not want to fail the path for a device
1214 * error, but this is what the old dm did. In future
1215 * patches we can do more advanced handling.
1216 */
1217 fail_path(pgpath);
1218 }
1219
1220 spin_lock_irqsave(&m->lock, flags);
1221 if (errors) {
e54f77dd
CS
1222 if (pgpath == m->current_pgpath) {
1223 DMERR("Could not failover device. Error %d.", errors);
1224 m->current_pgpath = NULL;
1225 m->current_pg = NULL;
1226 }
d0259bf0 1227 } else if (!m->pg_init_required)
cfae5c9b 1228 pg->bypassed = 0;
cfae5c9b 1229
d0259bf0
KU
1230 if (--m->pg_init_in_progress)
1231 /* Activations of other paths are still on going */
1232 goto out;
1233
3e9f1be1
HR
1234 if (m->pg_init_required) {
1235 m->pg_init_delay_retry = delay_retry;
1236 if (__pg_init_all_paths(m))
1237 goto out;
1238 }
1239 m->queue_io = 0;
d0259bf0 1240
2bded7bd
KU
1241 /*
1242 * Wake up any thread waiting to suspend.
1243 */
1244 wake_up(&m->pg_init_wait);
1245
d0259bf0 1246out:
cfae5c9b
CS
1247 spin_unlock_irqrestore(&m->lock, flags);
1248}
1249
bab7cfc7
CS
1250static void activate_path(struct work_struct *work)
1251{
e54f77dd 1252 struct pgpath *pgpath =
4e2d19e4 1253 container_of(work, struct pgpath, activate_path.work);
bab7cfc7 1254
3a017509
HR
1255 if (pgpath->is_active)
1256 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1257 pg_init_done, pgpath);
1258 else
1259 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
bab7cfc7
CS
1260}
1261
7e782af5
HR
1262static int noretry_error(int error)
1263{
1264 switch (error) {
1265 case -EOPNOTSUPP:
1266 case -EREMOTEIO:
1267 case -EILSEQ:
1268 case -ENODATA:
cc9d3c38 1269 case -ENOSPC:
7e782af5
HR
1270 return 1;
1271 }
1272
1273 /* Anything else could be a path failure, so should be retried */
1274 return 0;
1275}
1276
1da177e4
LT
1277/*
1278 * end_io handling
1279 */
f40c67f0 1280static int do_end_io(struct multipath *m, struct request *clone,
028867ac 1281 int error, struct dm_mpath_io *mpio)
1da177e4 1282{
f40c67f0
KU
1283 /*
1284 * We don't queue any clone request inside the multipath target
1285 * during end I/O handling, since those clone requests don't have
1286 * bio clones. If we queue them inside the multipath target,
1287 * we need to make bio clones, that requires memory allocation.
1288 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1289 * don't have bio clones.)
1290 * Instead of queueing the clone request here, we queue the original
1291 * request into dm core, which will remake a clone request and
1292 * clone bios for it and resubmit it later.
1293 */
1294 int r = DM_ENDIO_REQUEUE;
640eb3b0 1295 unsigned long flags;
1da177e4 1296
f40c67f0 1297 if (!error && !clone->errors)
1da177e4
LT
1298 return 0; /* I/O complete */
1299
7eee4ae2 1300 if (noretry_error(error))
959eb4e5
MS
1301 return error;
1302
cfae5c9b
CS
1303 if (mpio->pgpath)
1304 fail_path(mpio->pgpath);
1da177e4 1305
640eb3b0 1306 spin_lock_irqsave(&m->lock, flags);
751b2a7d
HR
1307 if (!m->nr_valid_paths) {
1308 if (!m->queue_if_no_path) {
1309 if (!__must_push_back(m))
1310 r = -EIO;
1311 } else {
1312 if (error == -EBADE)
1313 r = error;
1314 }
1315 }
640eb3b0 1316 spin_unlock_irqrestore(&m->lock, flags);
1da177e4 1317
f40c67f0 1318 return r;
1da177e4
LT
1319}
1320
f40c67f0 1321static int multipath_end_io(struct dm_target *ti, struct request *clone,
1da177e4
LT
1322 int error, union map_info *map_context)
1323{
028867ac 1324 struct multipath *m = ti->private;
2eff1924 1325 struct dm_mpath_io *mpio = get_mpio(map_context);
a71a261f 1326 struct pgpath *pgpath;
1da177e4
LT
1327 struct path_selector *ps;
1328 int r;
1329
466891f9
JN
1330 BUG_ON(!mpio);
1331
2eff1924 1332 r = do_end_io(m, clone, error, mpio);
a71a261f 1333 pgpath = mpio->pgpath;
1da177e4
LT
1334 if (pgpath) {
1335 ps = &pgpath->pg->ps;
1336 if (ps->type->end_io)
02ab823f 1337 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1da177e4 1338 }
2eff1924 1339 clear_request_fn_mpio(m, map_context);
1da177e4
LT
1340
1341 return r;
1342}
1343
1344/*
1345 * Suspend can't complete until all the I/O is processed so if
436d4108
AK
1346 * the last path fails we must error any remaining I/O.
1347 * Note that if the freeze_bdev fails while suspending, the
1348 * queue_if_no_path state is lost - userspace should reset it.
1da177e4
LT
1349 */
1350static void multipath_presuspend(struct dm_target *ti)
1351{
7943bd6d 1352 struct multipath *m = ti->private;
1da177e4 1353
485ef69e 1354 queue_if_no_path(m, 0, 1);
1da177e4
LT
1355}
1356
6df400ab
KU
1357static void multipath_postsuspend(struct dm_target *ti)
1358{
6380f26f
MA
1359 struct multipath *m = ti->private;
1360
1361 mutex_lock(&m->work_mutex);
2bded7bd 1362 flush_multipath_work(m);
6380f26f 1363 mutex_unlock(&m->work_mutex);
6df400ab
KU
1364}
1365
436d4108
AK
1366/*
1367 * Restore the queue_if_no_path setting.
1368 */
1da177e4
LT
1369static void multipath_resume(struct dm_target *ti)
1370{
7943bd6d 1371 struct multipath *m = ti->private;
1da177e4
LT
1372 unsigned long flags;
1373
1374 spin_lock_irqsave(&m->lock, flags);
436d4108 1375 m->queue_if_no_path = m->saved_queue_if_no_path;
1da177e4
LT
1376 spin_unlock_irqrestore(&m->lock, flags);
1377}
1378
1379/*
1380 * Info output has the following format:
1381 * num_multipath_feature_args [multipath_feature_args]*
1382 * num_handler_status_args [handler_status_args]*
1383 * num_groups init_group_number
1384 * [A|D|E num_ps_status_args [ps_status_args]*
1385 * num_paths num_selector_args
1386 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1387 *
1388 * Table output has the following format (identical to the constructor string):
1389 * num_feature_args [features_args]*
1390 * num_handler_args hw_handler [hw_handler_args]*
1391 * num_groups init_group_number
1392 * [priority selector-name num_ps_args [ps_args]*
1393 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1394 */
fd7c092e
MP
1395static void multipath_status(struct dm_target *ti, status_type_t type,
1396 unsigned status_flags, char *result, unsigned maxlen)
1da177e4
LT
1397{
1398 int sz = 0;
1399 unsigned long flags;
7943bd6d 1400 struct multipath *m = ti->private;
1da177e4
LT
1401 struct priority_group *pg;
1402 struct pgpath *p;
1403 unsigned pg_num;
1404 char state;
1405
1406 spin_lock_irqsave(&m->lock, flags);
1407
1408 /* Features */
1409 if (type == STATUSTYPE_INFO)
e8099177 1410 DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
c9e45581
DW
1411 else {
1412 DMEMIT("%u ", m->queue_if_no_path +
4e2d19e4 1413 (m->pg_init_retries > 0) * 2 +
a58a935d
MS
1414 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1415 m->retain_attached_hw_handler);
c9e45581
DW
1416 if (m->queue_if_no_path)
1417 DMEMIT("queue_if_no_path ");
1418 if (m->pg_init_retries)
1419 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
4e2d19e4
CS
1420 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1421 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
a58a935d
MS
1422 if (m->retain_attached_hw_handler)
1423 DMEMIT("retain_attached_hw_handler ");
c9e45581 1424 }
1da177e4 1425
cfae5c9b 1426 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1da177e4
LT
1427 DMEMIT("0 ");
1428 else
cfae5c9b 1429 DMEMIT("1 %s ", m->hw_handler_name);
1da177e4
LT
1430
1431 DMEMIT("%u ", m->nr_priority_groups);
1432
1433 if (m->next_pg)
1434 pg_num = m->next_pg->pg_num;
1435 else if (m->current_pg)
1436 pg_num = m->current_pg->pg_num;
1437 else
a490a07a 1438 pg_num = (m->nr_priority_groups ? 1 : 0);
1da177e4
LT
1439
1440 DMEMIT("%u ", pg_num);
1441
1442 switch (type) {
1443 case STATUSTYPE_INFO:
1444 list_for_each_entry(pg, &m->priority_groups, list) {
1445 if (pg->bypassed)
1446 state = 'D'; /* Disabled */
1447 else if (pg == m->current_pg)
1448 state = 'A'; /* Currently Active */
1449 else
1450 state = 'E'; /* Enabled */
1451
1452 DMEMIT("%c ", state);
1453
1454 if (pg->ps.type->status)
1455 sz += pg->ps.type->status(&pg->ps, NULL, type,
1456 result + sz,
1457 maxlen - sz);
1458 else
1459 DMEMIT("0 ");
1460
1461 DMEMIT("%u %u ", pg->nr_pgpaths,
1462 pg->ps.type->info_args);
1463
1464 list_for_each_entry(p, &pg->pgpaths, list) {
1465 DMEMIT("%s %s %u ", p->path.dev->name,
6680073d 1466 p->is_active ? "A" : "F",
1da177e4
LT
1467 p->fail_count);
1468 if (pg->ps.type->status)
1469 sz += pg->ps.type->status(&pg->ps,
1470 &p->path, type, result + sz,
1471 maxlen - sz);
1472 }
1473 }
1474 break;
1475
1476 case STATUSTYPE_TABLE:
1477 list_for_each_entry(pg, &m->priority_groups, list) {
1478 DMEMIT("%s ", pg->ps.type->name);
1479
1480 if (pg->ps.type->status)
1481 sz += pg->ps.type->status(&pg->ps, NULL, type,
1482 result + sz,
1483 maxlen - sz);
1484 else
1485 DMEMIT("0 ");
1486
1487 DMEMIT("%u %u ", pg->nr_pgpaths,
1488 pg->ps.type->table_args);
1489
1490 list_for_each_entry(p, &pg->pgpaths, list) {
1491 DMEMIT("%s ", p->path.dev->name);
1492 if (pg->ps.type->status)
1493 sz += pg->ps.type->status(&pg->ps,
1494 &p->path, type, result + sz,
1495 maxlen - sz);
1496 }
1497 }
1498 break;
1499 }
1500
1501 spin_unlock_irqrestore(&m->lock, flags);
1da177e4
LT
1502}
1503
1504static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1505{
6380f26f 1506 int r = -EINVAL;
1da177e4 1507 struct dm_dev *dev;
7943bd6d 1508 struct multipath *m = ti->private;
1da177e4
LT
1509 action_fn action;
1510
6380f26f
MA
1511 mutex_lock(&m->work_mutex);
1512
c2f3d24b
KU
1513 if (dm_suspended(ti)) {
1514 r = -EBUSY;
1515 goto out;
1516 }
1517
1da177e4 1518 if (argc == 1) {
498f0103 1519 if (!strcasecmp(argv[0], "queue_if_no_path")) {
6380f26f
MA
1520 r = queue_if_no_path(m, 1, 0);
1521 goto out;
498f0103 1522 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
6380f26f
MA
1523 r = queue_if_no_path(m, 0, 0);
1524 goto out;
1525 }
1da177e4
LT
1526 }
1527
6380f26f 1528 if (argc != 2) {
a356e426 1529 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
6380f26f
MA
1530 goto out;
1531 }
1da177e4 1532
498f0103 1533 if (!strcasecmp(argv[0], "disable_group")) {
6380f26f
MA
1534 r = bypass_pg_num(m, argv[1], 1);
1535 goto out;
498f0103 1536 } else if (!strcasecmp(argv[0], "enable_group")) {
6380f26f
MA
1537 r = bypass_pg_num(m, argv[1], 0);
1538 goto out;
498f0103 1539 } else if (!strcasecmp(argv[0], "switch_group")) {
6380f26f
MA
1540 r = switch_pg_num(m, argv[1]);
1541 goto out;
498f0103 1542 } else if (!strcasecmp(argv[0], "reinstate_path"))
1da177e4 1543 action = reinstate_path;
498f0103 1544 else if (!strcasecmp(argv[0], "fail_path"))
1da177e4 1545 action = fail_path;
6380f26f 1546 else {
a356e426 1547 DMWARN("Unrecognised multipath message received: %s", argv[0]);
6380f26f
MA
1548 goto out;
1549 }
1da177e4 1550
8215d6ec 1551 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1da177e4 1552 if (r) {
72d94861 1553 DMWARN("message: error getting device %s",
1da177e4 1554 argv[1]);
6380f26f 1555 goto out;
1da177e4
LT
1556 }
1557
1558 r = action_dev(m, dev, action);
1559
1560 dm_put_device(ti, dev);
1561
6380f26f
MA
1562out:
1563 mutex_unlock(&m->work_mutex);
1da177e4 1564 return r;
1da177e4
LT
1565}
1566
e56f81e0
CH
1567static int multipath_prepare_ioctl(struct dm_target *ti,
1568 struct block_device **bdev, fmode_t *mode)
9af4aa30 1569{
35991652 1570 struct multipath *m = ti->private;
9af4aa30 1571 unsigned long flags;
35991652
MP
1572 int r;
1573
9af4aa30
MB
1574 spin_lock_irqsave(&m->lock, flags);
1575
1576 if (!m->current_pgpath)
02ab823f 1577 __choose_pgpath(m, 0);
9af4aa30 1578
43e43c9e
JN
1579 if (m->current_pgpath) {
1580 if (!m->queue_io) {
1581 *bdev = m->current_pgpath->path.dev->bdev;
1582 *mode = m->current_pgpath->path.dev->mode;
1583 r = 0;
1584 } else {
1585 /* pg_init has not started or completed */
1586 r = -ENOTCONN;
1587 }
1588 } else {
1589 /* No path is available */
1590 if (m->queue_if_no_path)
1591 r = -ENOTCONN;
1592 else
1593 r = -EIO;
e90dae1f 1594 }
9af4aa30 1595
9af4aa30
MB
1596 spin_unlock_irqrestore(&m->lock, flags);
1597
5bbbfdf6 1598 if (r == -ENOTCONN) {
3e9f1be1
HR
1599 spin_lock_irqsave(&m->lock, flags);
1600 if (!m->current_pg) {
1601 /* Path status changed, redo selection */
1602 __choose_pgpath(m, 0);
1603 }
1604 if (m->pg_init_required)
1605 __pg_init_all_paths(m);
4cdd2ad7 1606 spin_unlock_irqrestore(&m->lock, flags);
63d832c3 1607 dm_table_run_md_queue_async(m->ti->table);
3e9f1be1 1608 }
35991652 1609
e56f81e0
CH
1610 /*
1611 * Only pass ioctls through if the device sizes match exactly.
1612 */
1613 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1614 return 1;
1615 return r;
9af4aa30
MB
1616}
1617
af4874e0
MS
1618static int multipath_iterate_devices(struct dm_target *ti,
1619 iterate_devices_callout_fn fn, void *data)
1620{
1621 struct multipath *m = ti->private;
1622 struct priority_group *pg;
1623 struct pgpath *p;
1624 int ret = 0;
1625
1626 list_for_each_entry(pg, &m->priority_groups, list) {
1627 list_for_each_entry(p, &pg->pgpaths, list) {
5dea271b 1628 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
af4874e0
MS
1629 if (ret)
1630 goto out;
1631 }
1632 }
1633
1634out:
1635 return ret;
1636}
1637
f40c67f0
KU
1638static int __pgpath_busy(struct pgpath *pgpath)
1639{
1640 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1641
52b09914 1642 return blk_lld_busy(q);
f40c67f0
KU
1643}
1644
1645/*
1646 * We return "busy", only when we can map I/Os but underlying devices
1647 * are busy (so even if we map I/Os now, the I/Os will wait on
1648 * the underlying queue).
1649 * In other words, if we want to kill I/Os or queue them inside us
1650 * due to map unavailability, we don't return "busy". Otherwise,
1651 * dm core won't give us the I/Os and we can't do what we want.
1652 */
1653static int multipath_busy(struct dm_target *ti)
1654{
1655 int busy = 0, has_active = 0;
1656 struct multipath *m = ti->private;
1657 struct priority_group *pg;
1658 struct pgpath *pgpath;
1659 unsigned long flags;
1660
1661 spin_lock_irqsave(&m->lock, flags);
1662
7a7a3b45
JN
1663 /* pg_init in progress or no paths available */
1664 if (m->pg_init_in_progress ||
1665 (!m->nr_valid_paths && m->queue_if_no_path)) {
b63349a7
HR
1666 busy = 1;
1667 goto out;
1668 }
f40c67f0
KU
1669 /* Guess which priority_group will be used at next mapping time */
1670 if (unlikely(!m->current_pgpath && m->next_pg))
1671 pg = m->next_pg;
1672 else if (likely(m->current_pg))
1673 pg = m->current_pg;
1674 else
1675 /*
1676 * We don't know which pg will be used at next mapping time.
1677 * We don't call __choose_pgpath() here to avoid to trigger
1678 * pg_init just by busy checking.
1679 * So we don't know whether underlying devices we will be using
1680 * at next mapping time are busy or not. Just try mapping.
1681 */
1682 goto out;
1683
1684 /*
1685 * If there is one non-busy active path at least, the path selector
1686 * will be able to select it. So we consider such a pg as not busy.
1687 */
1688 busy = 1;
1689 list_for_each_entry(pgpath, &pg->pgpaths, list)
1690 if (pgpath->is_active) {
1691 has_active = 1;
1692
1693 if (!__pgpath_busy(pgpath)) {
1694 busy = 0;
1695 break;
1696 }
1697 }
1698
1699 if (!has_active)
1700 /*
1701 * No active path in this pg, so this pg won't be used and
1702 * the current_pg will be changed at next mapping time.
1703 * We need to try mapping to determine it.
1704 */
1705 busy = 0;
1706
1707out:
1708 spin_unlock_irqrestore(&m->lock, flags);
1709
1710 return busy;
1711}
1712
1da177e4
LT
1713/*-----------------------------------------------------------------
1714 * Module setup
1715 *---------------------------------------------------------------*/
1716static struct target_type multipath_target = {
1717 .name = "multipath",
16f12266
MS
1718 .version = {1, 11, 0},
1719 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
1da177e4
LT
1720 .module = THIS_MODULE,
1721 .ctr = multipath_ctr,
1722 .dtr = multipath_dtr,
f40c67f0 1723 .map_rq = multipath_map,
e5863d9a
MS
1724 .clone_and_map_rq = multipath_clone_and_map,
1725 .release_clone_rq = multipath_release_clone,
f40c67f0 1726 .rq_end_io = multipath_end_io,
1da177e4 1727 .presuspend = multipath_presuspend,
6df400ab 1728 .postsuspend = multipath_postsuspend,
1da177e4
LT
1729 .resume = multipath_resume,
1730 .status = multipath_status,
1731 .message = multipath_message,
e56f81e0 1732 .prepare_ioctl = multipath_prepare_ioctl,
af4874e0 1733 .iterate_devices = multipath_iterate_devices,
f40c67f0 1734 .busy = multipath_busy,
1da177e4
LT
1735};
1736
1737static int __init dm_multipath_init(void)
1738{
1739 int r;
1740
1741 /* allocate a slab for the dm_ios */
028867ac 1742 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
1da177e4
LT
1743 if (!_mpio_cache)
1744 return -ENOMEM;
1745
1746 r = dm_register_target(&multipath_target);
1747 if (r < 0) {
0cd33124 1748 DMERR("register failed %d", r);
ff658e9c
JT
1749 r = -EINVAL;
1750 goto bad_register_target;
1da177e4
LT
1751 }
1752
4d4d66ab 1753 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
c557308e 1754 if (!kmultipathd) {
0cd33124 1755 DMERR("failed to create workqueue kmpathd");
ff658e9c
JT
1756 r = -ENOMEM;
1757 goto bad_alloc_kmultipathd;
c557308e
AK
1758 }
1759
bab7cfc7
CS
1760 /*
1761 * A separate workqueue is used to handle the device handlers
1762 * to avoid overloading existing workqueue. Overloading the
1763 * old workqueue would also create a bottleneck in the
1764 * path of the storage hardware device activation.
1765 */
4d4d66ab
TH
1766 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1767 WQ_MEM_RECLAIM);
bab7cfc7
CS
1768 if (!kmpath_handlerd) {
1769 DMERR("failed to create workqueue kmpath_handlerd");
ff658e9c
JT
1770 r = -ENOMEM;
1771 goto bad_alloc_kmpath_handlerd;
bab7cfc7
CS
1772 }
1773
72d94861 1774 DMINFO("version %u.%u.%u loaded",
1da177e4
LT
1775 multipath_target.version[0], multipath_target.version[1],
1776 multipath_target.version[2]);
1777
ff658e9c
JT
1778 return 0;
1779
1780bad_alloc_kmpath_handlerd:
1781 destroy_workqueue(kmultipathd);
1782bad_alloc_kmultipathd:
1783 dm_unregister_target(&multipath_target);
1784bad_register_target:
1785 kmem_cache_destroy(_mpio_cache);
1786
1da177e4
LT
1787 return r;
1788}
1789
1790static void __exit dm_multipath_exit(void)
1791{
bab7cfc7 1792 destroy_workqueue(kmpath_handlerd);
c557308e
AK
1793 destroy_workqueue(kmultipathd);
1794
10d3bd09 1795 dm_unregister_target(&multipath_target);
1da177e4
LT
1796 kmem_cache_destroy(_mpio_cache);
1797}
1798
1da177e4
LT
1799module_init(dm_multipath_init);
1800module_exit(dm_multipath_exit);
1801
1802MODULE_DESCRIPTION(DM_NAME " multipath target");
1803MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1804MODULE_LICENSE("GPL");