dm crypt: make printing of the key constant-time
[linux-2.6-block.git] / drivers / md / dm-mpath.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
586e80e6
MP
8#include <linux/device-mapper.h>
9
4cc96131 10#include "dm-rq.h"
76e33fe4 11#include "dm-bio-record.h"
1da177e4 12#include "dm-path-selector.h"
b15546f9 13#include "dm-uevent.h"
1da177e4 14
e5863d9a 15#include <linux/blkdev.h>
1da177e4
LT
16#include <linux/ctype.h>
17#include <linux/init.h>
18#include <linux/mempool.h>
19#include <linux/module.h>
20#include <linux/pagemap.h>
21#include <linux/slab.h>
22#include <linux/time.h>
be240ff5 23#include <linux/timer.h>
1da177e4 24#include <linux/workqueue.h>
35991652 25#include <linux/delay.h>
cfae5c9b 26#include <scsi/scsi_dh.h>
60063497 27#include <linux/atomic.h>
78ce23b5 28#include <linux/blk-mq.h>
1da177e4 29
72d94861 30#define DM_MSG_PREFIX "multipath"
4e2d19e4
CS
31#define DM_PG_INIT_DELAY_MSECS 2000
32#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
be240ff5
AP
33#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
34
35static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
1da177e4
LT
36
37/* Path properties */
38struct pgpath {
39 struct list_head list;
40
41 struct priority_group *pg; /* Owning PG */
42 unsigned fail_count; /* Cumulative failure count */
43
c922d5f7 44 struct dm_path path;
4e2d19e4 45 struct delayed_work activate_path;
be7d31cc
MS
46
47 bool is_active:1; /* Path status */
1da177e4
LT
48};
49
50#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
51
52/*
53 * Paths are grouped into Priority Groups and numbered from 1 upwards.
54 * Each has a path selector which controls which path gets used.
55 */
56struct priority_group {
57 struct list_head list;
58
59 struct multipath *m; /* Owning multipath instance */
60 struct path_selector ps;
61
62 unsigned pg_num; /* Reference number */
1da177e4
LT
63 unsigned nr_pgpaths; /* Number of paths in PG */
64 struct list_head pgpaths;
be7d31cc
MS
65
66 bool bypassed:1; /* Temporarily bypass this PG? */
1da177e4
LT
67};
68
69/* Multipath context */
70struct multipath {
848b8aef 71 unsigned long flags; /* Multipath state flags */
4e2d19e4 72
1fbdd2b3 73 spinlock_t lock;
848b8aef 74 enum dm_queue_mode queue_mode;
4e2d19e4 75
1da177e4
LT
76 struct pgpath *current_pgpath;
77 struct priority_group *current_pg;
78 struct priority_group *next_pg; /* Switch to this PG if set */
1da177e4 79
848b8aef
MS
80 atomic_t nr_valid_paths; /* Total number of usable paths */
81 unsigned nr_priority_groups;
82 struct list_head priority_groups;
1fbdd2b3 83
848b8aef
MS
84 const char *hw_handler_name;
85 char *hw_handler_params;
86 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
c9e45581 87 unsigned pg_init_retries; /* Number of times to retry pg_init */
4e2d19e4 88 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
91e968aa
MS
89 atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
90 atomic_t pg_init_count; /* Number of times pg_init called */
91
6380f26f 92 struct mutex work_mutex;
20800cb3 93 struct work_struct trigger_event;
848b8aef 94 struct dm_target *ti;
76e33fe4
MS
95
96 struct work_struct process_queued_bios;
97 struct bio_list queued_bios;
be240ff5
AP
98
99 struct timer_list nopath_timer; /* Timeout for queue_if_no_path */
1da177e4
LT
100};
101
102/*
76e33fe4 103 * Context information attached to each io we process.
1da177e4 104 */
028867ac 105struct dm_mpath_io {
1da177e4 106 struct pgpath *pgpath;
02ab823f 107 size_t nr_bytes;
1da177e4
LT
108};
109
110typedef int (*action_fn) (struct pgpath *pgpath);
111
bab7cfc7 112static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
c4028958 113static void trigger_event(struct work_struct *work);
89bfce76
BVA
114static void activate_or_offline_path(struct pgpath *pgpath);
115static void activate_path_work(struct work_struct *work);
76e33fe4 116static void process_queued_bios(struct work_struct *work);
be240ff5 117static void queue_if_no_path_timeout_work(struct timer_list *t);
1da177e4 118
518257b1
MS
119/*-----------------------------------------------
120 * Multipath state flags.
121 *-----------------------------------------------*/
122
123#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
124#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
125#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
126#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
127#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
128#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
129#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
1da177e4 130
374117ad
MS
131static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
132{
133 bool r = test_bit(MPATHF_bit, &m->flags);
134
135 if (r) {
136 unsigned long flags;
137 spin_lock_irqsave(&m->lock, flags);
138 r = test_bit(MPATHF_bit, &m->flags);
139 spin_unlock_irqrestore(&m->lock, flags);
140 }
141
142 return r;
143}
144
1da177e4
LT
145/*-----------------------------------------------
146 * Allocation routines
147 *-----------------------------------------------*/
148
149static struct pgpath *alloc_pgpath(void)
150{
e69fae56 151 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
1da177e4 152
848b8aef
MS
153 if (!pgpath)
154 return NULL;
155
156 pgpath->is_active = true;
1da177e4
LT
157
158 return pgpath;
159}
160
028867ac 161static void free_pgpath(struct pgpath *pgpath)
1da177e4
LT
162{
163 kfree(pgpath);
164}
165
166static struct priority_group *alloc_priority_group(void)
167{
168 struct priority_group *pg;
169
e69fae56 170 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
1da177e4 171
e69fae56
MM
172 if (pg)
173 INIT_LIST_HEAD(&pg->pgpaths);
1da177e4
LT
174
175 return pg;
176}
177
178static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
179{
180 struct pgpath *pgpath, *tmp;
181
182 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
183 list_del(&pgpath->list);
184 dm_put_device(ti, pgpath->path.dev);
185 free_pgpath(pgpath);
186 }
187}
188
189static void free_priority_group(struct priority_group *pg,
190 struct dm_target *ti)
191{
192 struct path_selector *ps = &pg->ps;
193
194 if (ps->type) {
195 ps->type->destroy(ps);
196 dm_put_path_selector(ps->type);
197 }
198
199 free_pgpaths(&pg->pgpaths, ti);
200 kfree(pg);
201}
202
e83068a5 203static struct multipath *alloc_multipath(struct dm_target *ti)
1da177e4
LT
204{
205 struct multipath *m;
206
e69fae56 207 m = kzalloc(sizeof(*m), GFP_KERNEL);
1da177e4 208 if (m) {
1da177e4
LT
209 INIT_LIST_HEAD(&m->priority_groups);
210 spin_lock_init(&m->lock);
91e968aa 211 atomic_set(&m->nr_valid_paths, 0);
c4028958 212 INIT_WORK(&m->trigger_event, trigger_event);
6380f26f 213 mutex_init(&m->work_mutex);
8637a6bf 214
e83068a5 215 m->queue_mode = DM_TYPE_NONE;
76e33fe4 216
28f16c20
MM
217 m->ti = ti;
218 ti->private = m;
be240ff5
AP
219
220 timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0);
1da177e4
LT
221 }
222
223 return m;
224}
225
e83068a5
MS
226static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
227{
228 if (m->queue_mode == DM_TYPE_NONE) {
953923c0 229 m->queue_mode = DM_TYPE_REQUEST_BASED;
8d47e659 230 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
e83068a5 231 INIT_WORK(&m->process_queued_bios, process_queued_bios);
8d47e659
MS
232 /*
233 * bio-based doesn't support any direct scsi_dh management;
234 * it just discovers if a scsi_dh is attached.
235 */
236 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
e83068a5
MS
237 }
238
239 dm_table_set_type(ti->table, m->queue_mode);
240
c3736674
MS
241 /*
242 * Init fields that are only used when a scsi_dh is attached
243 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
244 */
245 set_bit(MPATHF_QUEUE_IO, &m->flags);
246 atomic_set(&m->pg_init_in_progress, 0);
247 atomic_set(&m->pg_init_count, 0);
248 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
249 init_waitqueue_head(&m->pg_init_wait);
250
e83068a5
MS
251 return 0;
252}
253
1da177e4
LT
254static void free_multipath(struct multipath *m)
255{
256 struct priority_group *pg, *tmp;
1da177e4
LT
257
258 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
259 list_del(&pg->list);
260 free_priority_group(pg, m->ti);
261 }
262
cfae5c9b 263 kfree(m->hw_handler_name);
2bfd2e13 264 kfree(m->hw_handler_params);
d5ffebdd 265 mutex_destroy(&m->work_mutex);
1da177e4
LT
266 kfree(m);
267}
268
2eff1924
MS
269static struct dm_mpath_io *get_mpio(union map_info *info)
270{
271 return info->ptr;
272}
273
bf661be1
MS
274static size_t multipath_per_bio_data_size(void)
275{
276 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
277}
278
76e33fe4
MS
279static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
280{
bf661be1 281 return dm_per_bio_data(bio, multipath_per_bio_data_size());
76e33fe4
MS
282}
283
d07a241d 284static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
76e33fe4 285{
bf661be1 286 /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
bf661be1 287 void *bio_details = mpio + 1;
bf661be1
MS
288 return bio_details;
289}
290
63f6e6fd 291static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
bf661be1
MS
292{
293 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
d07a241d 294 struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
76e33fe4 295
d0442f80
MS
296 mpio->nr_bytes = bio->bi_iter.bi_size;
297 mpio->pgpath = NULL;
298 *mpio_p = mpio;
76e33fe4 299
d0442f80 300 dm_bio_record(bio_details, bio);
76e33fe4
MS
301}
302
1da177e4
LT
303/*-----------------------------------------------
304 * Path selection
305 *-----------------------------------------------*/
306
3e9f1be1 307static int __pg_init_all_paths(struct multipath *m)
fb612642
KU
308{
309 struct pgpath *pgpath;
4e2d19e4 310 unsigned long pg_init_delay = 0;
fb612642 311
b194679f
BVA
312 lockdep_assert_held(&m->lock);
313
91e968aa 314 if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
3e9f1be1 315 return 0;
17f4ff45 316
91e968aa 317 atomic_inc(&m->pg_init_count);
518257b1 318 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
3e9f1be1
HR
319
320 /* Check here to reset pg_init_required */
321 if (!m->current_pg)
322 return 0;
323
518257b1 324 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
4e2d19e4
CS
325 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
326 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
fb612642
KU
327 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
328 /* Skip failed paths */
329 if (!pgpath->is_active)
330 continue;
4e2d19e4
CS
331 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
332 pg_init_delay))
91e968aa 333 atomic_inc(&m->pg_init_in_progress);
fb612642 334 }
91e968aa 335 return atomic_read(&m->pg_init_in_progress);
fb612642
KU
336}
337
c1d7ecf7 338static int pg_init_all_paths(struct multipath *m)
1da177e4 339{
c1d7ecf7 340 int ret;
2da1610a
MS
341 unsigned long flags;
342
343 spin_lock_irqsave(&m->lock, flags);
c1d7ecf7 344 ret = __pg_init_all_paths(m);
2da1610a 345 spin_unlock_irqrestore(&m->lock, flags);
c1d7ecf7
BVA
346
347 return ret;
2da1610a
MS
348}
349
350static void __switch_pg(struct multipath *m, struct priority_group *pg)
351{
69cea0d4
MS
352 lockdep_assert_held(&m->lock);
353
2da1610a 354 m->current_pg = pg;
1da177e4
LT
355
356 /* Must we initialise the PG first, and queue I/O till it's ready? */
cfae5c9b 357 if (m->hw_handler_name) {
518257b1
MS
358 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
359 set_bit(MPATHF_QUEUE_IO, &m->flags);
1da177e4 360 } else {
518257b1
MS
361 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
362 clear_bit(MPATHF_QUEUE_IO, &m->flags);
1da177e4 363 }
c9e45581 364
91e968aa 365 atomic_set(&m->pg_init_count, 0);
1da177e4
LT
366}
367
2da1610a
MS
368static struct pgpath *choose_path_in_pg(struct multipath *m,
369 struct priority_group *pg,
370 size_t nr_bytes)
1da177e4 371{
2da1610a 372 unsigned long flags;
c922d5f7 373 struct dm_path *path;
2da1610a 374 struct pgpath *pgpath;
1da177e4 375
90a4323c 376 path = pg->ps.type->select_path(&pg->ps, nr_bytes);
1da177e4 377 if (!path)
2da1610a 378 return ERR_PTR(-ENXIO);
1da177e4 379
2da1610a 380 pgpath = path_to_pgpath(path);
1da177e4 381
506458ef 382 if (unlikely(READ_ONCE(m->current_pg) != pg)) {
2da1610a
MS
383 /* Only update current_pgpath if pg changed */
384 spin_lock_irqsave(&m->lock, flags);
385 m->current_pgpath = pgpath;
386 __switch_pg(m, pg);
387 spin_unlock_irqrestore(&m->lock, flags);
388 }
1da177e4 389
2da1610a 390 return pgpath;
1da177e4
LT
391}
392
2da1610a 393static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
1da177e4 394{
2da1610a 395 unsigned long flags;
1da177e4 396 struct priority_group *pg;
2da1610a 397 struct pgpath *pgpath;
d19a55cc 398 unsigned bypassed = 1;
1da177e4 399
91e968aa 400 if (!atomic_read(&m->nr_valid_paths)) {
69cea0d4 401 spin_lock_irqsave(&m->lock, flags);
8d47e659 402 clear_bit(MPATHF_QUEUE_IO, &m->flags);
69cea0d4 403 spin_unlock_irqrestore(&m->lock, flags);
1da177e4 404 goto failed;
1f271972 405 }
1da177e4
LT
406
407 /* Were we instructed to switch PG? */
506458ef 408 if (READ_ONCE(m->next_pg)) {
2da1610a 409 spin_lock_irqsave(&m->lock, flags);
1da177e4 410 pg = m->next_pg;
2da1610a
MS
411 if (!pg) {
412 spin_unlock_irqrestore(&m->lock, flags);
413 goto check_current_pg;
414 }
1da177e4 415 m->next_pg = NULL;
2da1610a
MS
416 spin_unlock_irqrestore(&m->lock, flags);
417 pgpath = choose_path_in_pg(m, pg, nr_bytes);
418 if (!IS_ERR_OR_NULL(pgpath))
419 return pgpath;
1da177e4
LT
420 }
421
422 /* Don't change PG until it has no remaining paths */
2da1610a 423check_current_pg:
506458ef 424 pg = READ_ONCE(m->current_pg);
2da1610a
MS
425 if (pg) {
426 pgpath = choose_path_in_pg(m, pg, nr_bytes);
427 if (!IS_ERR_OR_NULL(pgpath))
428 return pgpath;
429 }
1da177e4
LT
430
431 /*
432 * Loop through priority groups until we find a valid path.
433 * First time we skip PGs marked 'bypassed'.
f220fd4e
MC
434 * Second time we only try the ones we skipped, but set
435 * pg_init_delay_retry so we do not hammer controllers.
1da177e4
LT
436 */
437 do {
438 list_for_each_entry(pg, &m->priority_groups, list) {
d19a55cc 439 if (pg->bypassed == !!bypassed)
1da177e4 440 continue;
2da1610a
MS
441 pgpath = choose_path_in_pg(m, pg, nr_bytes);
442 if (!IS_ERR_OR_NULL(pgpath)) {
69cea0d4
MS
443 if (!bypassed) {
444 spin_lock_irqsave(&m->lock, flags);
518257b1 445 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
69cea0d4
MS
446 spin_unlock_irqrestore(&m->lock, flags);
447 }
2da1610a 448 return pgpath;
f220fd4e 449 }
1da177e4
LT
450 }
451 } while (bypassed--);
452
453failed:
2da1610a 454 spin_lock_irqsave(&m->lock, flags);
1da177e4
LT
455 m->current_pgpath = NULL;
456 m->current_pg = NULL;
2da1610a
MS
457 spin_unlock_irqrestore(&m->lock, flags);
458
459 return NULL;
1da177e4
LT
460}
461
45e15720 462/*
ac75b09f 463 * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
86331f39
BVA
464 * report the function name and line number of the function from which
465 * it has been invoked.
45e15720 466 */
86331f39 467#define dm_report_EIO(m) \
18a482f5 468do { \
ac75b09f 469 DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
d4a512ed 470 dm_table_device_name((m)->ti->table), \
ac75b09f
MS
471 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
472 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
473 dm_noflush_suspending((m)->ti)); \
18a482f5 474} while (0)
45e15720 475
c1fd0abe
MS
476/*
477 * Check whether bios must be queued in the device-mapper core rather
478 * than here in the target.
c1fd0abe 479 */
a862e4e2 480static bool __must_push_back(struct multipath *m)
c1fd0abe 481{
a862e4e2 482 return dm_noflush_suspending(m->ti);
c1fd0abe
MS
483}
484
c1fd0abe
MS
485static bool must_push_back_rq(struct multipath *m)
486{
73265f3f
MS
487 unsigned long flags;
488 bool ret;
489
490 spin_lock_irqsave(&m->lock, flags);
491 ret = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m));
492 spin_unlock_irqrestore(&m->lock, flags);
493
494 return ret;
c1fd0abe
MS
495}
496
36fcffcc 497/*
76e33fe4 498 * Map cloned requests (request-based multipath)
36fcffcc 499 */
eb8db831
CH
500static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
501 union map_info *map_context,
502 struct request **__clone)
1da177e4 503{
7943bd6d 504 struct multipath *m = ti->private;
eb8db831 505 size_t nr_bytes = blk_rq_bytes(rq);
1da177e4 506 struct pgpath *pgpath;
f40c67f0 507 struct block_device *bdev;
eb8db831 508 struct dm_mpath_io *mpio = get_mpio(map_context);
7083abbb 509 struct request_queue *q;
eb8db831 510 struct request *clone;
1da177e4 511
1da177e4 512 /* Do we need to select a new pgpath? */
506458ef 513 pgpath = READ_ONCE(m->current_pgpath);
374117ad 514 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
2da1610a 515 pgpath = choose_pgpath(m, nr_bytes);
1da177e4 516
9bf59a61 517 if (!pgpath) {
c1fd0abe 518 if (must_push_back_rq(m))
b88efd43 519 return DM_MAPIO_DELAY_REQUEUE;
18a482f5 520 dm_report_EIO(m); /* Failed */
f98e0eb6 521 return DM_MAPIO_KILL;
374117ad
MS
522 } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
523 mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
459b5401
ML
524 pg_init_all_paths(m);
525 return DM_MAPIO_DELAY_REQUEUE;
9bf59a61 526 }
6afbc01d 527
2eb6e1e3
KB
528 mpio->pgpath = pgpath;
529 mpio->nr_bytes = nr_bytes;
530
9bf59a61 531 bdev = pgpath->path.dev->bdev;
7083abbb 532 q = bdev_get_queue(bdev);
0bf6d96c 533 clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
ff005a06 534 BLK_MQ_REQ_NOWAIT);
eb8db831
CH
535 if (IS_ERR(clone)) {
536 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
848b8aef 537 if (blk_queue_dying(q)) {
7083abbb
BVA
538 atomic_inc(&m->pg_init_in_progress);
539 activate_or_offline_path(pgpath);
050af08f 540 return DM_MAPIO_DELAY_REQUEUE;
7083abbb 541 }
050af08f
ML
542
543 /*
544 * blk-mq's SCHED_RESTART can cover this requeue, so we
545 * needn't deal with it by DELAY_REQUEUE. More importantly,
546 * we have to return DM_MAPIO_REQUEUE so that blk-mq can
547 * get the queue busy feedback (via BLK_STS_RESOURCE),
548 * otherwise I/O merging can suffer.
549 */
6a23e05c 550 return DM_MAPIO_REQUEUE;
e5863d9a 551 }
eb8db831 552 clone->bio = clone->biotail = NULL;
eb8db831
CH
553 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
554 *__clone = clone;
e5863d9a 555
9bf59a61
MS
556 if (pgpath->pg->ps.type->start_io)
557 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
558 &pgpath->path,
559 nr_bytes);
2eb6e1e3 560 return DM_MAPIO_REMAPPED;
1da177e4
LT
561}
562
5de719e3
YY
563static void multipath_release_clone(struct request *clone,
564 union map_info *map_context)
e5863d9a 565{
5de719e3
YY
566 if (unlikely(map_context)) {
567 /*
568 * non-NULL map_context means caller is still map
569 * method; must undo multipath_clone_and_map()
570 */
571 struct dm_mpath_io *mpio = get_mpio(map_context);
572 struct pgpath *pgpath = mpio->pgpath;
573
574 if (pgpath && pgpath->pg->ps.type->end_io)
575 pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
576 &pgpath->path,
087615bf
GKB
577 mpio->nr_bytes,
578 clone->io_start_time_ns);
5de719e3
YY
579 }
580
0bf6d96c 581 blk_mq_free_request(clone);
e5863d9a
MS
582}
583
76e33fe4
MS
584/*
585 * Map cloned bios (bio-based multipath)
586 */
0001ec56 587
17213ec1 588static void __multipath_queue_bio(struct multipath *m, struct bio *bio)
f45f1186 589{
f45f1186 590 /* Queue for the daemon to resubmit */
f45f1186
MS
591 bio_list_add(&m->queued_bios, bio);
592 if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
593 queue_work(kmultipathd, &m->process_queued_bios);
17213ec1
MS
594}
595
596static void multipath_queue_bio(struct multipath *m, struct bio *bio)
597{
598 unsigned long flags;
599
600 spin_lock_irqsave(&m->lock, flags);
601 __multipath_queue_bio(m, bio);
f45f1186
MS
602 spin_unlock_irqrestore(&m->lock, flags);
603}
604
0001ec56 605static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
76e33fe4 606{
76e33fe4
MS
607 struct pgpath *pgpath;
608 unsigned long flags;
76e33fe4
MS
609
610 /* Do we need to select a new pgpath? */
506458ef 611 pgpath = READ_ONCE(m->current_pgpath);
374117ad 612 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
0001ec56 613 pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
76e33fe4 614
17213ec1 615 if (!pgpath) {
76e33fe4 616 spin_lock_irqsave(&m->lock, flags);
17213ec1
MS
617 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
618 __multipath_queue_bio(m, bio);
619 pgpath = ERR_PTR(-EAGAIN);
620 }
76e33fe4 621 spin_unlock_irqrestore(&m->lock, flags);
848b8aef 622
374117ad
MS
623 } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
624 mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
f45f1186 625 multipath_queue_bio(m, bio);
17213ec1 626 pg_init_all_paths(m);
0001ec56 627 return ERR_PTR(-EAGAIN);
76e33fe4
MS
628 }
629
0001ec56
MS
630 return pgpath;
631}
632
0001ec56
MS
633static int __multipath_map_bio(struct multipath *m, struct bio *bio,
634 struct dm_mpath_io *mpio)
635{
dbaf971c 636 struct pgpath *pgpath = __map_bio(m, bio);
848b8aef 637
0001ec56 638 if (IS_ERR(pgpath))
76e33fe4 639 return DM_MAPIO_SUBMITTED;
76e33fe4
MS
640
641 if (!pgpath) {
a862e4e2 642 if (__must_push_back(m))
ca5beb76 643 return DM_MAPIO_REQUEUE;
18a482f5 644 dm_report_EIO(m);
846785e6 645 return DM_MAPIO_KILL;
76e33fe4
MS
646 }
647
648 mpio->pgpath = pgpath;
76e33fe4 649
4e4cbee9 650 bio->bi_status = 0;
74d46992 651 bio_set_dev(bio, pgpath->path.dev->bdev);
1eff9d32 652 bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
76e33fe4
MS
653
654 if (pgpath->pg->ps.type->start_io)
655 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
656 &pgpath->path,
d0442f80 657 mpio->nr_bytes);
76e33fe4
MS
658 return DM_MAPIO_REMAPPED;
659}
660
661static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
662{
663 struct multipath *m = ti->private;
bf661be1
MS
664 struct dm_mpath_io *mpio = NULL;
665
63f6e6fd 666 multipath_init_per_bio_data(bio, &mpio);
76e33fe4
MS
667 return __multipath_map_bio(m, bio, mpio);
668}
669
7e48c768 670static void process_queued_io_list(struct multipath *m)
76e33fe4 671{
953923c0 672 if (m->queue_mode == DM_TYPE_REQUEST_BASED)
7e48c768 673 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
8d47e659 674 else if (m->queue_mode == DM_TYPE_BIO_BASED)
76e33fe4
MS
675 queue_work(kmultipathd, &m->process_queued_bios);
676}
677
678static void process_queued_bios(struct work_struct *work)
679{
680 int r;
681 unsigned long flags;
682 struct bio *bio;
683 struct bio_list bios;
684 struct blk_plug plug;
685 struct multipath *m =
686 container_of(work, struct multipath, process_queued_bios);
687
688 bio_list_init(&bios);
689
690 spin_lock_irqsave(&m->lock, flags);
691
692 if (bio_list_empty(&m->queued_bios)) {
693 spin_unlock_irqrestore(&m->lock, flags);
694 return;
695 }
696
697 bio_list_merge(&bios, &m->queued_bios);
698 bio_list_init(&m->queued_bios);
699
700 spin_unlock_irqrestore(&m->lock, flags);
701
702 blk_start_plug(&plug);
703 while ((bio = bio_list_pop(&bios))) {
1836df08
MS
704 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
705 dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
706 r = __multipath_map_bio(m, bio, mpio);
846785e6
CH
707 switch (r) {
708 case DM_MAPIO_KILL:
4e4cbee9
CH
709 bio->bi_status = BLK_STS_IOERR;
710 bio_endio(bio);
047385b3 711 break;
846785e6 712 case DM_MAPIO_REQUEUE:
4e4cbee9 713 bio->bi_status = BLK_STS_DM_REQUEUE;
76e33fe4 714 bio_endio(bio);
846785e6
CH
715 break;
716 case DM_MAPIO_REMAPPED:
ed00aabd 717 submit_bio_noacct(bio);
846785e6 718 break;
8192a0cd 719 case DM_MAPIO_SUBMITTED:
9157c8d3
BVA
720 break;
721 default:
722 WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
846785e6 723 }
76e33fe4
MS
724 }
725 blk_finish_plug(&plug);
726}
727
1da177e4
LT
728/*
729 * If we run out of usable paths, should we queue I/O or error it?
730 */
be7d31cc 731static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
4c3f4838 732 bool save_old_value, const char *caller)
1da177e4
LT
733{
734 unsigned long flags;
553ec94c 735 bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
d4a512ed 736 const char *dm_dev_name = dm_table_device_name(m->ti->table);
4c3f4838
MS
737
738 DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d",
739 dm_dev_name, __func__, caller, queue_if_no_path, save_old_value);
1da177e4
LT
740
741 spin_lock_irqsave(&m->lock, flags);
553ec94c
MS
742
743 queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
744 saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
745
746 if (save_old_value) {
747 if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) {
748 DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
4c3f4838 749 dm_dev_name);
553ec94c
MS
750 } else
751 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
752 } else if (!queue_if_no_path && saved_queue_if_no_path_bit) {
753 /* due to "fail_if_no_path" message, need to honor it. */
754 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
755 }
c1fd0abe 756 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
553ec94c 757
4c3f4838
MS
758 DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
759 dm_dev_name, __func__,
760 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
761 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
762 dm_noflush_suspending(m->ti));
763
1da177e4
LT
764 spin_unlock_irqrestore(&m->lock, flags);
765
76e33fe4 766 if (!queue_if_no_path) {
63d832c3 767 dm_table_run_md_queue_async(m->ti->table);
7e48c768 768 process_queued_io_list(m);
76e33fe4 769 }
63d832c3 770
1da177e4
LT
771 return 0;
772}
773
be240ff5
AP
774/*
775 * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
776 * process any queued I/O.
777 */
778static void queue_if_no_path_timeout_work(struct timer_list *t)
779{
780 struct multipath *m = from_timer(m, t, nopath_timer);
be240ff5 781
d4a512ed
MS
782 DMWARN("queue_if_no_path timeout on %s, failing queued IO",
783 dm_table_device_name(m->ti->table));
4c3f4838 784 queue_if_no_path(m, false, false, __func__);
be240ff5
AP
785}
786
787/*
788 * Enable the queue_if_no_path timeout if necessary.
789 * Called with m->lock held.
790 */
791static void enable_nopath_timeout(struct multipath *m)
792{
793 unsigned long queue_if_no_path_timeout =
794 READ_ONCE(queue_if_no_path_timeout_secs) * HZ;
795
796 lockdep_assert_held(&m->lock);
797
798 if (queue_if_no_path_timeout > 0 &&
799 atomic_read(&m->nr_valid_paths) == 0 &&
800 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
801 mod_timer(&m->nopath_timer,
802 jiffies + queue_if_no_path_timeout);
803 }
804}
805
806static void disable_nopath_timeout(struct multipath *m)
807{
808 del_timer_sync(&m->nopath_timer);
809}
810
1da177e4
LT
811/*
812 * An event is triggered whenever a path is taken out of use.
813 * Includes path failure and PG bypass.
814 */
c4028958 815static void trigger_event(struct work_struct *work)
1da177e4 816{
c4028958
DH
817 struct multipath *m =
818 container_of(work, struct multipath, trigger_event);
1da177e4
LT
819
820 dm_table_event(m->ti->table);
821}
822
823/*-----------------------------------------------------------------
824 * Constructor/argument parsing:
825 * <#multipath feature args> [<arg>]*
826 * <#hw_handler args> [hw_handler [<arg>]*]
827 * <#priority groups>
828 * <initial priority group>
829 * [<selector> <#selector args> [<arg>]*
830 * <#paths> <#per-path selector args>
831 * [<path> [<arg>]* ]+ ]+
832 *---------------------------------------------------------------*/
498f0103 833static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
1da177e4
LT
834 struct dm_target *ti)
835{
836 int r;
837 struct path_selector_type *pst;
838 unsigned ps_argc;
839
5916a22b 840 static const struct dm_arg _args[] = {
72d94861 841 {0, 1024, "invalid number of path selector args"},
1da177e4
LT
842 };
843
498f0103 844 pst = dm_get_path_selector(dm_shift_arg(as));
1da177e4 845 if (!pst) {
72d94861 846 ti->error = "unknown path selector type";
1da177e4
LT
847 return -EINVAL;
848 }
849
498f0103 850 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
371b2e34
MP
851 if (r) {
852 dm_put_path_selector(pst);
1da177e4 853 return -EINVAL;
371b2e34 854 }
1da177e4
LT
855
856 r = pst->create(&pg->ps, ps_argc, as->argv);
857 if (r) {
858 dm_put_path_selector(pst);
72d94861 859 ti->error = "path selector constructor failed";
1da177e4
LT
860 return r;
861 }
862
863 pg->ps.type = pst;
498f0103 864 dm_consume_args(as, ps_argc);
1da177e4
LT
865
866 return 0;
867}
868
e8f74a0f 869static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
b592211c 870 const char **attached_handler_name, char **error)
1da177e4 871{
848b8aef 872 struct request_queue *q = bdev_get_queue(bdev);
848b8aef 873 int r;
a58a935d 874
374117ad 875 if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) {
1bab0de0 876retain:
b592211c 877 if (*attached_handler_name) {
54cd640d 878 /*
879 * Clear any hw_handler_params associated with a
880 * handler that isn't already attached.
881 */
b592211c 882 if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
54cd640d 883 kfree(m->hw_handler_params);
884 m->hw_handler_params = NULL;
885 }
886
a58a935d
MS
887 /*
888 * Reset hw_handler_name to match the attached handler
a58a935d
MS
889 *
890 * NB. This modifies the table line to show the actual
891 * handler instead of the original table passed in.
892 */
893 kfree(m->hw_handler_name);
b592211c
MS
894 m->hw_handler_name = *attached_handler_name;
895 *attached_handler_name = NULL;
a58a935d
MS
896 }
897 }
a0cf7ea9 898
a58a935d 899 if (m->hw_handler_name) {
a0cf7ea9
HR
900 r = scsi_dh_attach(q, m->hw_handler_name);
901 if (r == -EBUSY) {
168678d7 902 DMINFO("retaining handler on device %pg", bdev);
1bab0de0
CH
903 goto retain;
904 }
ae11b1b3 905 if (r < 0) {
848b8aef
MS
906 *error = "error attaching hardware handler";
907 return r;
ae11b1b3 908 }
2bfd2e13
CS
909
910 if (m->hw_handler_params) {
911 r = scsi_dh_set_params(q, m->hw_handler_params);
912 if (r < 0) {
848b8aef
MS
913 *error = "unable to set hardware handler parameters";
914 return r;
2bfd2e13
CS
915 }
916 }
ae11b1b3
HR
917 }
918
848b8aef
MS
919 return 0;
920}
921
922static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
923 struct dm_target *ti)
924{
925 int r;
926 struct pgpath *p;
927 struct multipath *m = ti->private;
e8f74a0f 928 struct request_queue *q;
b592211c 929 const char *attached_handler_name = NULL;
848b8aef
MS
930
931 /* we need at least a path arg */
932 if (as->argc < 1) {
933 ti->error = "no device given";
934 return ERR_PTR(-EINVAL);
935 }
936
937 p = alloc_pgpath();
938 if (!p)
939 return ERR_PTR(-ENOMEM);
940
941 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
942 &p->path.dev);
943 if (r) {
944 ti->error = "error getting device";
945 goto bad;
946 }
947
e8f74a0f
MS
948 q = bdev_get_queue(p->path.dev->bdev);
949 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
e457edf0 950 if (attached_handler_name || m->hw_handler_name) {
848b8aef 951 INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
b592211c 952 r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
940bc471 953 kfree(attached_handler_name);
848b8aef
MS
954 if (r) {
955 dm_put_device(ti, p->path.dev);
956 goto bad;
957 }
958 }
959
1da177e4
LT
960 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
961 if (r) {
962 dm_put_device(ti, p->path.dev);
963 goto bad;
964 }
965
966 return p;
1da177e4
LT
967 bad:
968 free_pgpath(p);
01460f35 969 return ERR_PTR(r);
1da177e4
LT
970}
971
498f0103 972static struct priority_group *parse_priority_group(struct dm_arg_set *as,
28f16c20 973 struct multipath *m)
1da177e4 974{
5916a22b 975 static const struct dm_arg _args[] = {
72d94861
AK
976 {1, 1024, "invalid number of paths"},
977 {0, 1024, "invalid number of selector args"}
1da177e4
LT
978 };
979
980 int r;
498f0103 981 unsigned i, nr_selector_args, nr_args;
1da177e4 982 struct priority_group *pg;
28f16c20 983 struct dm_target *ti = m->ti;
1da177e4
LT
984
985 if (as->argc < 2) {
986 as->argc = 0;
01460f35
BM
987 ti->error = "not enough priority group arguments";
988 return ERR_PTR(-EINVAL);
1da177e4
LT
989 }
990
991 pg = alloc_priority_group();
992 if (!pg) {
72d94861 993 ti->error = "couldn't allocate priority group";
01460f35 994 return ERR_PTR(-ENOMEM);
1da177e4
LT
995 }
996 pg->m = m;
997
998 r = parse_path_selector(as, pg, ti);
999 if (r)
1000 goto bad;
1001
1002 /*
1003 * read the paths
1004 */
498f0103 1005 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
1da177e4
LT
1006 if (r)
1007 goto bad;
1008
498f0103 1009 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
1da177e4
LT
1010 if (r)
1011 goto bad;
1012
498f0103 1013 nr_args = 1 + nr_selector_args;
1da177e4
LT
1014 for (i = 0; i < pg->nr_pgpaths; i++) {
1015 struct pgpath *pgpath;
498f0103 1016 struct dm_arg_set path_args;
1da177e4 1017
498f0103 1018 if (as->argc < nr_args) {
148acff6 1019 ti->error = "not enough path parameters";
6bbf79a1 1020 r = -EINVAL;
1da177e4 1021 goto bad;
148acff6 1022 }
1da177e4 1023
498f0103 1024 path_args.argc = nr_args;
1da177e4
LT
1025 path_args.argv = as->argv;
1026
1027 pgpath = parse_path(&path_args, &pg->ps, ti);
01460f35
BM
1028 if (IS_ERR(pgpath)) {
1029 r = PTR_ERR(pgpath);
1da177e4 1030 goto bad;
01460f35 1031 }
1da177e4
LT
1032
1033 pgpath->pg = pg;
1034 list_add_tail(&pgpath->list, &pg->pgpaths);
498f0103 1035 dm_consume_args(as, nr_args);
1da177e4
LT
1036 }
1037
1038 return pg;
1039
1040 bad:
1041 free_priority_group(pg, ti);
01460f35 1042 return ERR_PTR(r);
1da177e4
LT
1043}
1044
498f0103 1045static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1da177e4 1046{
1da177e4 1047 unsigned hw_argc;
2bfd2e13 1048 int ret;
28f16c20 1049 struct dm_target *ti = m->ti;
1da177e4 1050
5916a22b 1051 static const struct dm_arg _args[] = {
72d94861 1052 {0, 1024, "invalid number of hardware handler args"},
1da177e4
LT
1053 };
1054
498f0103 1055 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
1da177e4
LT
1056 return -EINVAL;
1057
1058 if (!hw_argc)
1059 return 0;
1060
8d47e659 1061 if (m->queue_mode == DM_TYPE_BIO_BASED) {
76e33fe4
MS
1062 dm_consume_args(as, hw_argc);
1063 DMERR("bio-based multipath doesn't allow hardware handler args");
1064 return 0;
1065 }
1066
498f0103 1067 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
f97dc421 1068 if (!m->hw_handler_name)
1069 return -EINVAL;
14e98c5c 1070
2bfd2e13
CS
1071 if (hw_argc > 1) {
1072 char *p;
1073 int i, j, len = 4;
1074
1075 for (i = 0; i <= hw_argc - 2; i++)
1076 len += strlen(as->argv[i]) + 1;
1077 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1078 if (!p) {
1079 ti->error = "memory allocation failed";
1080 ret = -ENOMEM;
1081 goto fail;
1082 }
1083 j = sprintf(p, "%d", hw_argc - 1);
1084 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
1085 j = sprintf(p, "%s", as->argv[i]);
1086 }
498f0103 1087 dm_consume_args(as, hw_argc - 1);
1da177e4
LT
1088
1089 return 0;
2bfd2e13
CS
1090fail:
1091 kfree(m->hw_handler_name);
1092 m->hw_handler_name = NULL;
1093 return ret;
1da177e4
LT
1094}
1095
498f0103 1096static int parse_features(struct dm_arg_set *as, struct multipath *m)
1da177e4
LT
1097{
1098 int r;
1099 unsigned argc;
28f16c20 1100 struct dm_target *ti = m->ti;
498f0103 1101 const char *arg_name;
1da177e4 1102
5916a22b 1103 static const struct dm_arg _args[] = {
e83068a5 1104 {0, 8, "invalid number of feature args"},
c9e45581 1105 {1, 50, "pg_init_retries must be between 1 and 50"},
4e2d19e4 1106 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1da177e4
LT
1107 };
1108
498f0103 1109 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1da177e4
LT
1110 if (r)
1111 return -EINVAL;
1112
1113 if (!argc)
1114 return 0;
1115
c9e45581 1116 do {
498f0103 1117 arg_name = dm_shift_arg(as);
c9e45581
DW
1118 argc--;
1119
498f0103 1120 if (!strcasecmp(arg_name, "queue_if_no_path")) {
4c3f4838 1121 r = queue_if_no_path(m, true, false, __func__);
c9e45581
DW
1122 continue;
1123 }
1124
a58a935d 1125 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
518257b1 1126 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
a58a935d
MS
1127 continue;
1128 }
1129
498f0103 1130 if (!strcasecmp(arg_name, "pg_init_retries") &&
c9e45581 1131 (argc >= 1)) {
498f0103 1132 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
c9e45581
DW
1133 argc--;
1134 continue;
1135 }
1136
498f0103 1137 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
4e2d19e4 1138 (argc >= 1)) {
498f0103 1139 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
4e2d19e4
CS
1140 argc--;
1141 continue;
1142 }
1143
e83068a5
MS
1144 if (!strcasecmp(arg_name, "queue_mode") &&
1145 (argc >= 1)) {
1146 const char *queue_mode_name = dm_shift_arg(as);
1147
1148 if (!strcasecmp(queue_mode_name, "bio"))
1149 m->queue_mode = DM_TYPE_BIO_BASED;
953923c0
MS
1150 else if (!strcasecmp(queue_mode_name, "rq") ||
1151 !strcasecmp(queue_mode_name, "mq"))
e83068a5 1152 m->queue_mode = DM_TYPE_REQUEST_BASED;
e83068a5
MS
1153 else {
1154 ti->error = "Unknown 'queue_mode' requested";
1155 r = -EINVAL;
1156 }
1157 argc--;
1158 continue;
1159 }
1160
1da177e4 1161 ti->error = "Unrecognised multipath feature request";
c9e45581
DW
1162 r = -EINVAL;
1163 } while (argc && !r);
1164
1165 return r;
1da177e4
LT
1166}
1167
e83068a5 1168static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1da177e4 1169{
498f0103 1170 /* target arguments */
5916a22b 1171 static const struct dm_arg _args[] = {
a490a07a
MS
1172 {0, 1024, "invalid number of priority groups"},
1173 {0, 1024, "invalid initial priority group number"},
1da177e4
LT
1174 };
1175
1176 int r;
1177 struct multipath *m;
498f0103 1178 struct dm_arg_set as;
1da177e4
LT
1179 unsigned pg_count = 0;
1180 unsigned next_pg_num;
be240ff5 1181 unsigned long flags;
1da177e4
LT
1182
1183 as.argc = argc;
1184 as.argv = argv;
1185
e83068a5 1186 m = alloc_multipath(ti);
1da177e4 1187 if (!m) {
72d94861 1188 ti->error = "can't allocate multipath";
1da177e4
LT
1189 return -EINVAL;
1190 }
1191
28f16c20 1192 r = parse_features(&as, m);
1da177e4
LT
1193 if (r)
1194 goto bad;
1195
e83068a5
MS
1196 r = alloc_multipath_stage2(ti, m);
1197 if (r)
1198 goto bad;
1199
28f16c20 1200 r = parse_hw_handler(&as, m);
1da177e4
LT
1201 if (r)
1202 goto bad;
1203
498f0103 1204 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1da177e4
LT
1205 if (r)
1206 goto bad;
1207
498f0103 1208 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1da177e4
LT
1209 if (r)
1210 goto bad;
1211
a490a07a
MS
1212 if ((!m->nr_priority_groups && next_pg_num) ||
1213 (m->nr_priority_groups && !next_pg_num)) {
1214 ti->error = "invalid initial priority group";
1215 r = -EINVAL;
1216 goto bad;
1217 }
1218
1da177e4
LT
1219 /* parse the priority groups */
1220 while (as.argc) {
1221 struct priority_group *pg;
91e968aa 1222 unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1da177e4 1223
28f16c20 1224 pg = parse_priority_group(&as, m);
01460f35
BM
1225 if (IS_ERR(pg)) {
1226 r = PTR_ERR(pg);
1da177e4
LT
1227 goto bad;
1228 }
1229
91e968aa
MS
1230 nr_valid_paths += pg->nr_pgpaths;
1231 atomic_set(&m->nr_valid_paths, nr_valid_paths);
1232
1da177e4
LT
1233 list_add_tail(&pg->list, &m->priority_groups);
1234 pg_count++;
1235 pg->pg_num = pg_count;
1236 if (!--next_pg_num)
1237 m->next_pg = pg;
1238 }
1239
1240 if (pg_count != m->nr_priority_groups) {
72d94861 1241 ti->error = "priority group count mismatch";
1da177e4
LT
1242 r = -EINVAL;
1243 goto bad;
1244 }
1245
be240ff5
AP
1246 spin_lock_irqsave(&m->lock, flags);
1247 enable_nopath_timeout(m);
1248 spin_unlock_irqrestore(&m->lock, flags);
1249
55a62eef
AK
1250 ti->num_flush_bios = 1;
1251 ti->num_discard_bios = 1;
ac62d620 1252 ti->num_write_zeroes_bios = 1;
8d47e659 1253 if (m->queue_mode == DM_TYPE_BIO_BASED)
bf661be1 1254 ti->per_io_data_size = multipath_per_bio_data_size();
eb8db831 1255 else
8637a6bf 1256 ti->per_io_data_size = sizeof(struct dm_mpath_io);
8627921f 1257
1da177e4
LT
1258 return 0;
1259
1260 bad:
1261 free_multipath(m);
1262 return r;
1263}
1264
2bded7bd
KU
1265static void multipath_wait_for_pg_init_completion(struct multipath *m)
1266{
9f4c3f87 1267 DEFINE_WAIT(wait);
2bded7bd
KU
1268
1269 while (1) {
9f4c3f87 1270 prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
2bded7bd 1271
91e968aa 1272 if (!atomic_read(&m->pg_init_in_progress))
2bded7bd 1273 break;
2bded7bd
KU
1274
1275 io_schedule();
1276 }
9f4c3f87 1277 finish_wait(&m->pg_init_wait, &wait);
2bded7bd
KU
1278}
1279
1280static void flush_multipath_work(struct multipath *m)
1da177e4 1281{
848b8aef 1282 if (m->hw_handler_name) {
c322ee93
MS
1283 unsigned long flags;
1284
1285 if (!atomic_read(&m->pg_init_in_progress))
1286 goto skip;
1287
1288 spin_lock_irqsave(&m->lock, flags);
1289 if (atomic_read(&m->pg_init_in_progress) &&
1290 !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
1291 spin_unlock_irqrestore(&m->lock, flags);
848b8aef 1292
935fcc56 1293 flush_workqueue(kmpath_handlerd);
c322ee93 1294 multipath_wait_for_pg_init_completion(m);
848b8aef 1295
c322ee93
MS
1296 spin_lock_irqsave(&m->lock, flags);
1297 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1298 }
1299 spin_unlock_irqrestore(&m->lock, flags);
848b8aef 1300 }
c322ee93 1301skip:
935fcc56 1302 if (m->queue_mode == DM_TYPE_BIO_BASED)
1303 flush_work(&m->process_queued_bios);
43829731 1304 flush_work(&m->trigger_event);
6df400ab
KU
1305}
1306
1307static void multipath_dtr(struct dm_target *ti)
1308{
1309 struct multipath *m = ti->private;
1310
be240ff5 1311 disable_nopath_timeout(m);
2bded7bd 1312 flush_multipath_work(m);
1da177e4
LT
1313 free_multipath(m);
1314}
1315
1da177e4
LT
1316/*
1317 * Take a path out of use.
1318 */
1319static int fail_path(struct pgpath *pgpath)
1320{
1321 unsigned long flags;
1322 struct multipath *m = pgpath->pg->m;
1323
1324 spin_lock_irqsave(&m->lock, flags);
1325
6680073d 1326 if (!pgpath->is_active)
1da177e4
LT
1327 goto out;
1328
04867370 1329 DMWARN("%s: Failing path %s.",
d4a512ed 1330 dm_table_device_name(m->ti->table),
04867370 1331 pgpath->path.dev->name);
1da177e4
LT
1332
1333 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
be7d31cc 1334 pgpath->is_active = false;
1da177e4
LT
1335 pgpath->fail_count++;
1336
91e968aa 1337 atomic_dec(&m->nr_valid_paths);
1da177e4
LT
1338
1339 if (pgpath == m->current_pgpath)
1340 m->current_pgpath = NULL;
1341
b15546f9 1342 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
91e968aa 1343 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
b15546f9 1344
fe9cf30e 1345 schedule_work(&m->trigger_event);
1da177e4 1346
be240ff5
AP
1347 enable_nopath_timeout(m);
1348
1da177e4
LT
1349out:
1350 spin_unlock_irqrestore(&m->lock, flags);
1351
1352 return 0;
1353}
1354
1355/*
1356 * Reinstate a previously-failed path
1357 */
1358static int reinstate_path(struct pgpath *pgpath)
1359{
63d832c3 1360 int r = 0, run_queue = 0;
1da177e4
LT
1361 unsigned long flags;
1362 struct multipath *m = pgpath->pg->m;
91e968aa 1363 unsigned nr_valid_paths;
1da177e4
LT
1364
1365 spin_lock_irqsave(&m->lock, flags);
1366
6680073d 1367 if (pgpath->is_active)
1da177e4
LT
1368 goto out;
1369
04867370 1370 DMWARN("%s: Reinstating path %s.",
d4a512ed 1371 dm_table_device_name(m->ti->table),
04867370 1372 pgpath->path.dev->name);
1da177e4
LT
1373
1374 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1375 if (r)
1376 goto out;
1377
be7d31cc 1378 pgpath->is_active = true;
1da177e4 1379
91e968aa
MS
1380 nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1381 if (nr_valid_paths == 1) {
e54f77dd 1382 m->current_pgpath = NULL;
63d832c3 1383 run_queue = 1;
e54f77dd 1384 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
4e2d19e4 1385 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
91e968aa 1386 atomic_inc(&m->pg_init_in_progress);
e54f77dd 1387 }
1da177e4 1388
b15546f9 1389 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
91e968aa 1390 pgpath->path.dev->name, nr_valid_paths);
b15546f9 1391
fe9cf30e 1392 schedule_work(&m->trigger_event);
1da177e4
LT
1393
1394out:
1395 spin_unlock_irqrestore(&m->lock, flags);
76e33fe4 1396 if (run_queue) {
63d832c3 1397 dm_table_run_md_queue_async(m->ti->table);
7e48c768 1398 process_queued_io_list(m);
76e33fe4 1399 }
1da177e4 1400
be240ff5
AP
1401 if (pgpath->is_active)
1402 disable_nopath_timeout(m);
1403
1da177e4
LT
1404 return r;
1405}
1406
1407/*
1408 * Fail or reinstate all paths that match the provided struct dm_dev.
1409 */
1410static int action_dev(struct multipath *m, struct dm_dev *dev,
1411 action_fn action)
1412{
19040c0b 1413 int r = -EINVAL;
1da177e4
LT
1414 struct pgpath *pgpath;
1415 struct priority_group *pg;
1416
1417 list_for_each_entry(pg, &m->priority_groups, list) {
1418 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1419 if (pgpath->path.dev == dev)
1420 r = action(pgpath);
1421 }
1422 }
1423
1424 return r;
1425}
1426
1427/*
1428 * Temporarily try to avoid having to use the specified PG
1429 */
1430static void bypass_pg(struct multipath *m, struct priority_group *pg,
be7d31cc 1431 bool bypassed)
1da177e4
LT
1432{
1433 unsigned long flags;
1434
1435 spin_lock_irqsave(&m->lock, flags);
1436
1437 pg->bypassed = bypassed;
1438 m->current_pgpath = NULL;
1439 m->current_pg = NULL;
1440
1441 spin_unlock_irqrestore(&m->lock, flags);
1442
fe9cf30e 1443 schedule_work(&m->trigger_event);
1da177e4
LT
1444}
1445
1446/*
1447 * Switch to using the specified PG from the next I/O that gets mapped
1448 */
1449static int switch_pg_num(struct multipath *m, const char *pgstr)
1450{
1451 struct priority_group *pg;
1452 unsigned pgnum;
1453 unsigned long flags;
31998ef1 1454 char dummy;
1da177e4 1455
31998ef1 1456 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
cc5bd925 1457 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1da177e4
LT
1458 DMWARN("invalid PG number supplied to switch_pg_num");
1459 return -EINVAL;
1460 }
1461
1462 spin_lock_irqsave(&m->lock, flags);
1463 list_for_each_entry(pg, &m->priority_groups, list) {
be7d31cc 1464 pg->bypassed = false;
1da177e4
LT
1465 if (--pgnum)
1466 continue;
1467
1468 m->current_pgpath = NULL;
1469 m->current_pg = NULL;
1470 m->next_pg = pg;
1471 }
1472 spin_unlock_irqrestore(&m->lock, flags);
1473
fe9cf30e 1474 schedule_work(&m->trigger_event);
1da177e4
LT
1475 return 0;
1476}
1477
1478/*
1479 * Set/clear bypassed status of a PG.
1480 * PGs are numbered upwards from 1 in the order they were declared.
1481 */
be7d31cc 1482static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1da177e4
LT
1483{
1484 struct priority_group *pg;
1485 unsigned pgnum;
31998ef1 1486 char dummy;
1da177e4 1487
31998ef1 1488 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
cc5bd925 1489 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1da177e4
LT
1490 DMWARN("invalid PG number supplied to bypass_pg");
1491 return -EINVAL;
1492 }
1493
1494 list_for_each_entry(pg, &m->priority_groups, list) {
1495 if (!--pgnum)
1496 break;
1497 }
1498
1499 bypass_pg(m, pg, bypassed);
1500 return 0;
1501}
1502
c9e45581
DW
1503/*
1504 * Should we retry pg_init immediately?
1505 */
be7d31cc 1506static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
c9e45581
DW
1507{
1508 unsigned long flags;
be7d31cc 1509 bool limit_reached = false;
c9e45581
DW
1510
1511 spin_lock_irqsave(&m->lock, flags);
1512
91e968aa
MS
1513 if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1514 !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
518257b1 1515 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
c9e45581 1516 else
be7d31cc 1517 limit_reached = true;
c9e45581
DW
1518
1519 spin_unlock_irqrestore(&m->lock, flags);
1520
1521 return limit_reached;
1522}
1523
3ae31f6a 1524static void pg_init_done(void *data, int errors)
cfae5c9b 1525{
83c0d5d5 1526 struct pgpath *pgpath = data;
cfae5c9b
CS
1527 struct priority_group *pg = pgpath->pg;
1528 struct multipath *m = pg->m;
1529 unsigned long flags;
be7d31cc 1530 bool delay_retry = false;
cfae5c9b
CS
1531
1532 /* device or driver problems */
1533 switch (errors) {
1534 case SCSI_DH_OK:
1535 break;
1536 case SCSI_DH_NOSYS:
1537 if (!m->hw_handler_name) {
1538 errors = 0;
1539 break;
1540 }
f7b934c8
MB
1541 DMERR("Could not failover the device: Handler scsi_dh_%s "
1542 "Error %d.", m->hw_handler_name, errors);
cfae5c9b
CS
1543 /*
1544 * Fail path for now, so we do not ping pong
1545 */
1546 fail_path(pgpath);
1547 break;
1548 case SCSI_DH_DEV_TEMP_BUSY:
1549 /*
1550 * Probably doing something like FW upgrade on the
1551 * controller so try the other pg.
1552 */
be7d31cc 1553 bypass_pg(m, pg, true);
cfae5c9b 1554 break;
cfae5c9b 1555 case SCSI_DH_RETRY:
4e2d19e4 1556 /* Wait before retrying. */
4ecc5081 1557 delay_retry = true;
df561f66 1558 fallthrough;
cfae5c9b
CS
1559 case SCSI_DH_IMM_RETRY:
1560 case SCSI_DH_RES_TEMP_UNAVAIL:
1561 if (pg_init_limit_reached(m, pgpath))
1562 fail_path(pgpath);
1563 errors = 0;
1564 break;
ec31f3f7 1565 case SCSI_DH_DEV_OFFLINED:
cfae5c9b
CS
1566 default:
1567 /*
1568 * We probably do not want to fail the path for a device
1569 * error, but this is what the old dm did. In future
1570 * patches we can do more advanced handling.
1571 */
1572 fail_path(pgpath);
1573 }
1574
1575 spin_lock_irqsave(&m->lock, flags);
1576 if (errors) {
e54f77dd
CS
1577 if (pgpath == m->current_pgpath) {
1578 DMERR("Could not failover device. Error %d.", errors);
1579 m->current_pgpath = NULL;
1580 m->current_pg = NULL;
1581 }
518257b1 1582 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
be7d31cc 1583 pg->bypassed = false;
cfae5c9b 1584
91e968aa 1585 if (atomic_dec_return(&m->pg_init_in_progress) > 0)
d0259bf0
KU
1586 /* Activations of other paths are still on going */
1587 goto out;
1588
518257b1
MS
1589 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1590 if (delay_retry)
1591 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1592 else
1593 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1594
3e9f1be1
HR
1595 if (__pg_init_all_paths(m))
1596 goto out;
1597 }
518257b1 1598 clear_bit(MPATHF_QUEUE_IO, &m->flags);
d0259bf0 1599
7e48c768 1600 process_queued_io_list(m);
76e33fe4 1601
2bded7bd
KU
1602 /*
1603 * Wake up any thread waiting to suspend.
1604 */
1605 wake_up(&m->pg_init_wait);
1606
d0259bf0 1607out:
cfae5c9b
CS
1608 spin_unlock_irqrestore(&m->lock, flags);
1609}
1610
89bfce76 1611static void activate_or_offline_path(struct pgpath *pgpath)
bab7cfc7 1612{
f10e06b7 1613 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
bab7cfc7 1614
f10e06b7
MS
1615 if (pgpath->is_active && !blk_queue_dying(q))
1616 scsi_dh_activate(q, pg_init_done, pgpath);
3a017509
HR
1617 else
1618 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
bab7cfc7
CS
1619}
1620
89bfce76
BVA
1621static void activate_path_work(struct work_struct *work)
1622{
1623 struct pgpath *pgpath =
1624 container_of(work, struct pgpath, activate_path.work);
1625
1626 activate_or_offline_path(pgpath);
1627}
1628
b79f10ee 1629static int multipath_end_io(struct dm_target *ti, struct request *clone,
2a842aca 1630 blk_status_t error, union map_info *map_context)
1da177e4 1631{
b79f10ee
CH
1632 struct dm_mpath_io *mpio = get_mpio(map_context);
1633 struct pgpath *pgpath = mpio->pgpath;
7ed8578a 1634 int r = DM_ENDIO_DONE;
b79f10ee 1635
f40c67f0
KU
1636 /*
1637 * We don't queue any clone request inside the multipath target
1638 * during end I/O handling, since those clone requests don't have
1639 * bio clones. If we queue them inside the multipath target,
1640 * we need to make bio clones, that requires memory allocation.
4cc96131 1641 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
f40c67f0
KU
1642 * don't have bio clones.)
1643 * Instead of queueing the clone request here, we queue the original
1644 * request into dm core, which will remake a clone request and
1645 * clone bios for it and resubmit it later.
1646 */
a1275677 1647 if (error && blk_path_error(error)) {
b79f10ee 1648 struct multipath *m = ti->private;
1da177e4 1649
ac514ffc
MS
1650 if (error == BLK_STS_RESOURCE)
1651 r = DM_ENDIO_DELAY_REQUEUE;
1652 else
1653 r = DM_ENDIO_REQUEUE;
1da177e4 1654
b79f10ee
CH
1655 if (pgpath)
1656 fail_path(pgpath);
1da177e4 1657
73265f3f 1658 if (!atomic_read(&m->nr_valid_paths) &&
c1fd0abe 1659 !must_push_back_rq(m)) {
2a842aca 1660 if (error == BLK_STS_IOERR)
18a482f5 1661 dm_report_EIO(m);
7ed8578a
CH
1662 /* complete with the original error */
1663 r = DM_ENDIO_DONE;
1664 }
b79f10ee 1665 }
466891f9 1666
1da177e4 1667 if (pgpath) {
b79f10ee
CH
1668 struct path_selector *ps = &pgpath->pg->ps;
1669
1da177e4 1670 if (ps->type->end_io)
087615bf
GKB
1671 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1672 clone->io_start_time_ns);
1da177e4 1673 }
1da177e4 1674
7ed8578a 1675 return r;
1da177e4
LT
1676}
1677
4e4cbee9 1678static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
848b8aef 1679 blk_status_t *error)
76e33fe4 1680{
14ef1e48
CH
1681 struct multipath *m = ti->private;
1682 struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1683 struct pgpath *pgpath = mpio->pgpath;
76e33fe4 1684 unsigned long flags;
1be56909 1685 int r = DM_ENDIO_DONE;
76e33fe4 1686
a1275677 1687 if (!*error || !blk_path_error(*error))
14ef1e48 1688 goto done;
76e33fe4 1689
14ef1e48
CH
1690 if (pgpath)
1691 fail_path(pgpath);
76e33fe4 1692
a271a89c
MS
1693 if (!atomic_read(&m->nr_valid_paths)) {
1694 spin_lock_irqsave(&m->lock, flags);
1695 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1696 if (__must_push_back(m)) {
1697 r = DM_ENDIO_REQUEUE;
1698 } else {
1699 dm_report_EIO(m);
1700 *error = BLK_STS_IOERR;
1701 }
1702 spin_unlock_irqrestore(&m->lock, flags);
1703 goto done;
c1fd0abe 1704 }
a271a89c 1705 spin_unlock_irqrestore(&m->lock, flags);
18a482f5 1706 }
76e33fe4 1707
f45f1186 1708 multipath_queue_bio(m, clone);
1be56909 1709 r = DM_ENDIO_INCOMPLETE;
14ef1e48 1710done:
76e33fe4 1711 if (pgpath) {
14ef1e48
CH
1712 struct path_selector *ps = &pgpath->pg->ps;
1713
76e33fe4 1714 if (ps->type->end_io)
087615bf
GKB
1715 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1716 dm_start_time_ns_from_clone(clone));
76e33fe4
MS
1717 }
1718
1be56909 1719 return r;
76e33fe4
MS
1720}
1721
1da177e4 1722/*
553ec94c
MS
1723 * Suspend with flush can't complete until all the I/O is processed
1724 * so if the last path fails we must error any remaining I/O.
1725 * - Note that if the freeze_bdev fails while suspending, the
1726 * queue_if_no_path state is lost - userspace should reset it.
1727 * Otherwise, during noflush suspend, queue_if_no_path will not change.
1da177e4
LT
1728 */
1729static void multipath_presuspend(struct dm_target *ti)
1730{
7943bd6d 1731 struct multipath *m = ti->private;
1da177e4 1732
553ec94c
MS
1733 /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
1734 if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
4c3f4838 1735 queue_if_no_path(m, false, true, __func__);
1da177e4
LT
1736}
1737
6df400ab
KU
1738static void multipath_postsuspend(struct dm_target *ti)
1739{
6380f26f
MA
1740 struct multipath *m = ti->private;
1741
1742 mutex_lock(&m->work_mutex);
2bded7bd 1743 flush_multipath_work(m);
6380f26f 1744 mutex_unlock(&m->work_mutex);
6df400ab
KU
1745}
1746
436d4108
AK
1747/*
1748 * Restore the queue_if_no_path setting.
1749 */
1da177e4
LT
1750static void multipath_resume(struct dm_target *ti)
1751{
7943bd6d 1752 struct multipath *m = ti->private;
1814f2e3 1753 unsigned long flags;
1da177e4 1754
1814f2e3 1755 spin_lock_irqsave(&m->lock, flags);
553ec94c
MS
1756 if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
1757 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1758 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
1759 }
4c3f4838
MS
1760
1761 DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
d4a512ed 1762 dm_table_device_name(m->ti->table), __func__,
4c3f4838
MS
1763 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
1764 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
1765
1814f2e3 1766 spin_unlock_irqrestore(&m->lock, flags);
1da177e4
LT
1767}
1768
1769/*
1770 * Info output has the following format:
1771 * num_multipath_feature_args [multipath_feature_args]*
1772 * num_handler_status_args [handler_status_args]*
1773 * num_groups init_group_number
1774 * [A|D|E num_ps_status_args [ps_status_args]*
1775 * num_paths num_selector_args
1776 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1777 *
1778 * Table output has the following format (identical to the constructor string):
1779 * num_feature_args [features_args]*
1780 * num_handler_args hw_handler [hw_handler_args]*
1781 * num_groups init_group_number
1782 * [priority selector-name num_ps_args [ps_args]*
1783 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1784 */
fd7c092e
MP
1785static void multipath_status(struct dm_target *ti, status_type_t type,
1786 unsigned status_flags, char *result, unsigned maxlen)
1da177e4 1787{
33ace4ca 1788 int sz = 0, pg_counter, pgpath_counter;
1da177e4 1789 unsigned long flags;
7943bd6d 1790 struct multipath *m = ti->private;
1da177e4
LT
1791 struct priority_group *pg;
1792 struct pgpath *p;
1793 unsigned pg_num;
1794 char state;
1795
1796 spin_lock_irqsave(&m->lock, flags);
1797
1798 /* Features */
1799 if (type == STATUSTYPE_INFO)
91e968aa
MS
1800 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1801 atomic_read(&m->pg_init_count));
c9e45581 1802 else {
518257b1 1803 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
4e2d19e4 1804 (m->pg_init_retries > 0) * 2 +
a58a935d 1805 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
e83068a5
MS
1806 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1807 (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1808
518257b1 1809 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
c9e45581
DW
1810 DMEMIT("queue_if_no_path ");
1811 if (m->pg_init_retries)
1812 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
4e2d19e4
CS
1813 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1814 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
518257b1 1815 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
a58a935d 1816 DMEMIT("retain_attached_hw_handler ");
e83068a5
MS
1817 if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1818 switch(m->queue_mode) {
1819 case DM_TYPE_BIO_BASED:
1820 DMEMIT("queue_mode bio ");
1821 break;
7e0d574f
BVA
1822 default:
1823 WARN_ON_ONCE(true);
1824 break;
e83068a5
MS
1825 }
1826 }
c9e45581 1827 }
1da177e4 1828
cfae5c9b 1829 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1da177e4
LT
1830 DMEMIT("0 ");
1831 else
cfae5c9b 1832 DMEMIT("1 %s ", m->hw_handler_name);
1da177e4
LT
1833
1834 DMEMIT("%u ", m->nr_priority_groups);
1835
1836 if (m->next_pg)
1837 pg_num = m->next_pg->pg_num;
1838 else if (m->current_pg)
1839 pg_num = m->current_pg->pg_num;
1840 else
a490a07a 1841 pg_num = (m->nr_priority_groups ? 1 : 0);
1da177e4
LT
1842
1843 DMEMIT("%u ", pg_num);
1844
1845 switch (type) {
1846 case STATUSTYPE_INFO:
1847 list_for_each_entry(pg, &m->priority_groups, list) {
1848 if (pg->bypassed)
1849 state = 'D'; /* Disabled */
1850 else if (pg == m->current_pg)
1851 state = 'A'; /* Currently Active */
1852 else
1853 state = 'E'; /* Enabled */
1854
1855 DMEMIT("%c ", state);
1856
1857 if (pg->ps.type->status)
1858 sz += pg->ps.type->status(&pg->ps, NULL, type,
1859 result + sz,
1860 maxlen - sz);
1861 else
1862 DMEMIT("0 ");
1863
1864 DMEMIT("%u %u ", pg->nr_pgpaths,
1865 pg->ps.type->info_args);
1866
1867 list_for_each_entry(p, &pg->pgpaths, list) {
1868 DMEMIT("%s %s %u ", p->path.dev->name,
6680073d 1869 p->is_active ? "A" : "F",
1da177e4
LT
1870 p->fail_count);
1871 if (pg->ps.type->status)
1872 sz += pg->ps.type->status(&pg->ps,
1873 &p->path, type, result + sz,
1874 maxlen - sz);
1875 }
1876 }
1877 break;
1878
1879 case STATUSTYPE_TABLE:
1880 list_for_each_entry(pg, &m->priority_groups, list) {
1881 DMEMIT("%s ", pg->ps.type->name);
1882
1883 if (pg->ps.type->status)
1884 sz += pg->ps.type->status(&pg->ps, NULL, type,
1885 result + sz,
1886 maxlen - sz);
1887 else
1888 DMEMIT("0 ");
1889
1890 DMEMIT("%u %u ", pg->nr_pgpaths,
1891 pg->ps.type->table_args);
1892
1893 list_for_each_entry(p, &pg->pgpaths, list) {
1894 DMEMIT("%s ", p->path.dev->name);
1895 if (pg->ps.type->status)
1896 sz += pg->ps.type->status(&pg->ps,
1897 &p->path, type, result + sz,
1898 maxlen - sz);
1899 }
1900 }
1901 break;
8ec45662
TS
1902
1903 case STATUSTYPE_IMA:
33ace4ca
TS
1904 sz = 0; /*reset the result pointer*/
1905
8ec45662 1906 DMEMIT_TARGET_NAME_VERSION(ti->type);
33ace4ca
TS
1907 DMEMIT(",nr_priority_groups=%u", m->nr_priority_groups);
1908
1909 pg_counter = 0;
8ec45662
TS
1910 list_for_each_entry(pg, &m->priority_groups, list) {
1911 if (pg->bypassed)
1912 state = 'D'; /* Disabled */
1913 else if (pg == m->current_pg)
1914 state = 'A'; /* Currently Active */
1915 else
1916 state = 'E'; /* Enabled */
33ace4ca
TS
1917 DMEMIT(",pg_state_%d=%c", pg_counter, state);
1918 DMEMIT(",nr_pgpaths_%d=%u", pg_counter, pg->nr_pgpaths);
1919 DMEMIT(",path_selector_name_%d=%s", pg_counter, pg->ps.type->name);
8ec45662 1920
33ace4ca 1921 pgpath_counter = 0;
8ec45662 1922 list_for_each_entry(p, &pg->pgpaths, list) {
33ace4ca
TS
1923 DMEMIT(",path_name_%d_%d=%s,is_active_%d_%d=%c,fail_count_%d_%d=%u",
1924 pg_counter, pgpath_counter, p->path.dev->name,
1925 pg_counter, pgpath_counter, p->is_active ? 'A' : 'F',
1926 pg_counter, pgpath_counter, p->fail_count);
8ec45662 1927 if (pg->ps.type->status) {
33ace4ca
TS
1928 DMEMIT(",path_selector_status_%d_%d=",
1929 pg_counter, pgpath_counter);
8ec45662
TS
1930 sz += pg->ps.type->status(&pg->ps, &p->path,
1931 type, result + sz,
1932 maxlen - sz);
1933 }
33ace4ca 1934 pgpath_counter++;
8ec45662 1935 }
33ace4ca 1936 pg_counter++;
8ec45662
TS
1937 }
1938 DMEMIT(";");
1939 break;
1da177e4
LT
1940 }
1941
1942 spin_unlock_irqrestore(&m->lock, flags);
1da177e4
LT
1943}
1944
1eb5fa84
MS
1945static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
1946 char *result, unsigned maxlen)
1da177e4 1947{
6380f26f 1948 int r = -EINVAL;
1da177e4 1949 struct dm_dev *dev;
7943bd6d 1950 struct multipath *m = ti->private;
1da177e4 1951 action_fn action;
be240ff5 1952 unsigned long flags;
1da177e4 1953
6380f26f
MA
1954 mutex_lock(&m->work_mutex);
1955
c2f3d24b
KU
1956 if (dm_suspended(ti)) {
1957 r = -EBUSY;
1958 goto out;
1959 }
1960
1da177e4 1961 if (argc == 1) {
498f0103 1962 if (!strcasecmp(argv[0], "queue_if_no_path")) {
4c3f4838 1963 r = queue_if_no_path(m, true, false, __func__);
be240ff5
AP
1964 spin_lock_irqsave(&m->lock, flags);
1965 enable_nopath_timeout(m);
1966 spin_unlock_irqrestore(&m->lock, flags);
6380f26f 1967 goto out;
498f0103 1968 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
4c3f4838 1969 r = queue_if_no_path(m, false, false, __func__);
be240ff5 1970 disable_nopath_timeout(m);
6380f26f
MA
1971 goto out;
1972 }
1da177e4
LT
1973 }
1974
6380f26f 1975 if (argc != 2) {
a356e426 1976 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
6380f26f
MA
1977 goto out;
1978 }
1da177e4 1979
498f0103 1980 if (!strcasecmp(argv[0], "disable_group")) {
be7d31cc 1981 r = bypass_pg_num(m, argv[1], true);
6380f26f 1982 goto out;
498f0103 1983 } else if (!strcasecmp(argv[0], "enable_group")) {
be7d31cc 1984 r = bypass_pg_num(m, argv[1], false);
6380f26f 1985 goto out;
498f0103 1986 } else if (!strcasecmp(argv[0], "switch_group")) {
6380f26f
MA
1987 r = switch_pg_num(m, argv[1]);
1988 goto out;
498f0103 1989 } else if (!strcasecmp(argv[0], "reinstate_path"))
1da177e4 1990 action = reinstate_path;
498f0103 1991 else if (!strcasecmp(argv[0], "fail_path"))
1da177e4 1992 action = fail_path;
6380f26f 1993 else {
a356e426 1994 DMWARN("Unrecognised multipath message received: %s", argv[0]);
6380f26f
MA
1995 goto out;
1996 }
1da177e4 1997
8215d6ec 1998 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1da177e4 1999 if (r) {
72d94861 2000 DMWARN("message: error getting device %s",
1da177e4 2001 argv[1]);
6380f26f 2002 goto out;
1da177e4
LT
2003 }
2004
2005 r = action_dev(m, dev, action);
2006
2007 dm_put_device(ti, dev);
2008
6380f26f
MA
2009out:
2010 mutex_unlock(&m->work_mutex);
1da177e4 2011 return r;
1da177e4
LT
2012}
2013
e56f81e0 2014static int multipath_prepare_ioctl(struct dm_target *ti,
5bd5e8d8 2015 struct block_device **bdev)
9af4aa30 2016{
35991652 2017 struct multipath *m = ti->private;
564dbb13 2018 struct pgpath *pgpath;
69cea0d4 2019 unsigned long flags;
35991652
MP
2020 int r;
2021
564dbb13 2022 pgpath = READ_ONCE(m->current_pgpath);
374117ad 2023 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
564dbb13 2024 pgpath = choose_pgpath(m, 0);
9af4aa30 2025
564dbb13 2026 if (pgpath) {
374117ad 2027 if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) {
564dbb13 2028 *bdev = pgpath->path.dev->bdev;
43e43c9e
JN
2029 r = 0;
2030 } else {
2031 /* pg_init has not started or completed */
2032 r = -ENOTCONN;
2033 }
2034 } else {
2035 /* No path is available */
a271a89c
MS
2036 r = -EIO;
2037 spin_lock_irqsave(&m->lock, flags);
518257b1 2038 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
43e43c9e 2039 r = -ENOTCONN;
a271a89c 2040 spin_unlock_irqrestore(&m->lock, flags);
e90dae1f 2041 }
9af4aa30 2042
5bbbfdf6 2043 if (r == -ENOTCONN) {
506458ef 2044 if (!READ_ONCE(m->current_pg)) {
3e9f1be1 2045 /* Path status changed, redo selection */
2da1610a 2046 (void) choose_pgpath(m, 0);
3e9f1be1 2047 }
69cea0d4 2048 spin_lock_irqsave(&m->lock, flags);
518257b1 2049 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
69cea0d4
MS
2050 (void) __pg_init_all_paths(m);
2051 spin_unlock_irqrestore(&m->lock, flags);
63d832c3 2052 dm_table_run_md_queue_async(m->ti->table);
7e48c768 2053 process_queued_io_list(m);
3e9f1be1 2054 }
35991652 2055
e56f81e0
CH
2056 /*
2057 * Only pass ioctls through if the device sizes match exactly.
2058 */
6dcbb52c 2059 if (!r && ti->len != bdev_nr_sectors((*bdev)))
e56f81e0
CH
2060 return 1;
2061 return r;
9af4aa30
MB
2062}
2063
af4874e0
MS
2064static int multipath_iterate_devices(struct dm_target *ti,
2065 iterate_devices_callout_fn fn, void *data)
2066{
2067 struct multipath *m = ti->private;
2068 struct priority_group *pg;
2069 struct pgpath *p;
2070 int ret = 0;
2071
2072 list_for_each_entry(pg, &m->priority_groups, list) {
2073 list_for_each_entry(p, &pg->pgpaths, list) {
5dea271b 2074 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
af4874e0
MS
2075 if (ret)
2076 goto out;
2077 }
2078 }
2079
2080out:
2081 return ret;
2082}
2083
9f54cec5 2084static int pgpath_busy(struct pgpath *pgpath)
f40c67f0
KU
2085{
2086 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
2087
52b09914 2088 return blk_lld_busy(q);
f40c67f0
KU
2089}
2090
2091/*
2092 * We return "busy", only when we can map I/Os but underlying devices
2093 * are busy (so even if we map I/Os now, the I/Os will wait on
2094 * the underlying queue).
2095 * In other words, if we want to kill I/Os or queue them inside us
2096 * due to map unavailability, we don't return "busy". Otherwise,
2097 * dm core won't give us the I/Os and we can't do what we want.
2098 */
2099static int multipath_busy(struct dm_target *ti)
2100{
be7d31cc 2101 bool busy = false, has_active = false;
f40c67f0 2102 struct multipath *m = ti->private;
2da1610a 2103 struct priority_group *pg, *next_pg;
f40c67f0 2104 struct pgpath *pgpath;
f40c67f0 2105
b88efd43
MS
2106 /* pg_init in progress */
2107 if (atomic_read(&m->pg_init_in_progress))
2da1610a
MS
2108 return true;
2109
b88efd43 2110 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
a271a89c
MS
2111 if (!atomic_read(&m->nr_valid_paths)) {
2112 unsigned long flags;
2113 spin_lock_irqsave(&m->lock, flags);
2114 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
2115 spin_unlock_irqrestore(&m->lock, flags);
2116 return (m->queue_mode != DM_TYPE_REQUEST_BASED);
2117 }
2118 spin_unlock_irqrestore(&m->lock, flags);
2119 }
b88efd43 2120
f40c67f0 2121 /* Guess which priority_group will be used at next mapping time */
506458ef
WD
2122 pg = READ_ONCE(m->current_pg);
2123 next_pg = READ_ONCE(m->next_pg);
2124 if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
2da1610a
MS
2125 pg = next_pg;
2126
2127 if (!pg) {
f40c67f0
KU
2128 /*
2129 * We don't know which pg will be used at next mapping time.
2da1610a 2130 * We don't call choose_pgpath() here to avoid to trigger
f40c67f0
KU
2131 * pg_init just by busy checking.
2132 * So we don't know whether underlying devices we will be using
2133 * at next mapping time are busy or not. Just try mapping.
2134 */
2da1610a
MS
2135 return busy;
2136 }
f40c67f0
KU
2137
2138 /*
2139 * If there is one non-busy active path at least, the path selector
2140 * will be able to select it. So we consider such a pg as not busy.
2141 */
be7d31cc 2142 busy = true;
2da1610a 2143 list_for_each_entry(pgpath, &pg->pgpaths, list) {
f40c67f0 2144 if (pgpath->is_active) {
be7d31cc 2145 has_active = true;
9f54cec5 2146 if (!pgpath_busy(pgpath)) {
be7d31cc 2147 busy = false;
f40c67f0
KU
2148 break;
2149 }
2150 }
2da1610a 2151 }
f40c67f0 2152
2da1610a 2153 if (!has_active) {
f40c67f0
KU
2154 /*
2155 * No active path in this pg, so this pg won't be used and
2156 * the current_pg will be changed at next mapping time.
2157 * We need to try mapping to determine it.
2158 */
be7d31cc 2159 busy = false;
2da1610a 2160 }
f40c67f0
KU
2161
2162 return busy;
2163}
2164
1da177e4
LT
2165/*-----------------------------------------------------------------
2166 * Module setup
2167 *---------------------------------------------------------------*/
2168static struct target_type multipath_target = {
2169 .name = "multipath",
636be424 2170 .version = {1, 14, 0},
8c5c1473
SM
2171 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2172 DM_TARGET_PASSES_INTEGRITY,
1da177e4
LT
2173 .module = THIS_MODULE,
2174 .ctr = multipath_ctr,
2175 .dtr = multipath_dtr,
e5863d9a
MS
2176 .clone_and_map_rq = multipath_clone_and_map,
2177 .release_clone_rq = multipath_release_clone,
f40c67f0 2178 .rq_end_io = multipath_end_io,
76e33fe4
MS
2179 .map = multipath_map_bio,
2180 .end_io = multipath_end_io_bio,
2181 .presuspend = multipath_presuspend,
2182 .postsuspend = multipath_postsuspend,
2183 .resume = multipath_resume,
2184 .status = multipath_status,
2185 .message = multipath_message,
2186 .prepare_ioctl = multipath_prepare_ioctl,
2187 .iterate_devices = multipath_iterate_devices,
2188 .busy = multipath_busy,
2189};
2190
1da177e4
LT
2191static int __init dm_multipath_init(void)
2192{
2193 int r;
2194
4d4d66ab 2195 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
c557308e 2196 if (!kmultipathd) {
0cd33124 2197 DMERR("failed to create workqueue kmpathd");
ff658e9c
JT
2198 r = -ENOMEM;
2199 goto bad_alloc_kmultipathd;
c557308e
AK
2200 }
2201
bab7cfc7
CS
2202 /*
2203 * A separate workqueue is used to handle the device handlers
2204 * to avoid overloading existing workqueue. Overloading the
2205 * old workqueue would also create a bottleneck in the
2206 * path of the storage hardware device activation.
2207 */
4d4d66ab
TH
2208 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2209 WQ_MEM_RECLAIM);
bab7cfc7
CS
2210 if (!kmpath_handlerd) {
2211 DMERR("failed to create workqueue kmpath_handlerd");
ff658e9c
JT
2212 r = -ENOMEM;
2213 goto bad_alloc_kmpath_handlerd;
bab7cfc7
CS
2214 }
2215
7e6358d2 2216 r = dm_register_target(&multipath_target);
2217 if (r < 0) {
2218 DMERR("request-based register failed %d", r);
2219 r = -EINVAL;
2220 goto bad_register_target;
2221 }
2222
ff658e9c
JT
2223 return 0;
2224
7e6358d2 2225bad_register_target:
2226 destroy_workqueue(kmpath_handlerd);
ff658e9c
JT
2227bad_alloc_kmpath_handlerd:
2228 destroy_workqueue(kmultipathd);
2229bad_alloc_kmultipathd:
1da177e4
LT
2230 return r;
2231}
2232
2233static void __exit dm_multipath_exit(void)
2234{
bab7cfc7 2235 destroy_workqueue(kmpath_handlerd);
c557308e
AK
2236 destroy_workqueue(kmultipathd);
2237
10d3bd09 2238 dm_unregister_target(&multipath_target);
1da177e4
LT
2239}
2240
1da177e4
LT
2241module_init(dm_multipath_init);
2242module_exit(dm_multipath_exit);
2243
be240ff5
AP
2244module_param_named(queue_if_no_path_timeout_secs,
2245 queue_if_no_path_timeout_secs, ulong, S_IRUGO | S_IWUSR);
2246MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
2247
1da177e4
LT
2248MODULE_DESCRIPTION(DM_NAME " multipath target");
2249MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2250MODULE_LICENSE("GPL");