Commit | Line | Data |
---|---|---|
4cc96131 MS |
1 | /* |
2 | * Copyright (C) 2016 Red Hat, Inc. All rights reserved. | |
3 | * | |
4 | * This file is released under the GPL. | |
5 | */ | |
6 | ||
7 | #include "dm-core.h" | |
8 | #include "dm-rq.h" | |
9 | ||
10 | #include <linux/elevator.h> /* for rq_end_sector() */ | |
11 | #include <linux/blk-mq.h> | |
12 | ||
13 | #define DM_MSG_PREFIX "core-rq" | |
14 | ||
15 | #define DM_MQ_NR_HW_QUEUES 1 | |
16 | #define DM_MQ_QUEUE_DEPTH 2048 | |
17 | static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES; | |
18 | static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH; | |
19 | ||
20 | /* | |
21 | * Request-based DM's mempools' reserved IOs set by the user. | |
22 | */ | |
23 | #define RESERVED_REQUEST_BASED_IOS 256 | |
24 | static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; | |
25 | ||
26 | #ifdef CONFIG_DM_MQ_DEFAULT | |
27 | static bool use_blk_mq = true; | |
28 | #else | |
29 | static bool use_blk_mq = false; | |
30 | #endif | |
31 | ||
32 | bool dm_use_blk_mq_default(void) | |
33 | { | |
34 | return use_blk_mq; | |
35 | } | |
36 | ||
37 | bool dm_use_blk_mq(struct mapped_device *md) | |
38 | { | |
39 | return md->use_blk_mq; | |
40 | } | |
41 | EXPORT_SYMBOL_GPL(dm_use_blk_mq); | |
42 | ||
43 | unsigned dm_get_reserved_rq_based_ios(void) | |
44 | { | |
45 | return __dm_get_module_param(&reserved_rq_based_ios, | |
46 | RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS); | |
47 | } | |
48 | EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); | |
49 | ||
50 | static unsigned dm_get_blk_mq_nr_hw_queues(void) | |
51 | { | |
52 | return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32); | |
53 | } | |
54 | ||
55 | static unsigned dm_get_blk_mq_queue_depth(void) | |
56 | { | |
57 | return __dm_get_module_param(&dm_mq_queue_depth, | |
58 | DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH); | |
59 | } | |
60 | ||
61 | int dm_request_based(struct mapped_device *md) | |
62 | { | |
63 | return blk_queue_stackable(md->queue); | |
64 | } | |
65 | ||
66 | static void dm_old_start_queue(struct request_queue *q) | |
67 | { | |
68 | unsigned long flags; | |
69 | ||
70 | spin_lock_irqsave(q->queue_lock, flags); | |
71 | if (blk_queue_stopped(q)) | |
72 | blk_start_queue(q); | |
73 | spin_unlock_irqrestore(q->queue_lock, flags); | |
74 | } | |
75 | ||
76 | void dm_start_queue(struct request_queue *q) | |
77 | { | |
78 | if (!q->mq_ops) | |
79 | dm_old_start_queue(q); | |
80 | else { | |
7d9595d8 | 81 | queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q); |
4cc96131 MS |
82 | blk_mq_start_stopped_hw_queues(q, true); |
83 | blk_mq_kick_requeue_list(q); | |
84 | } | |
85 | } | |
86 | ||
87 | static void dm_old_stop_queue(struct request_queue *q) | |
88 | { | |
89 | unsigned long flags; | |
90 | ||
91 | spin_lock_irqsave(q->queue_lock, flags); | |
92 | if (blk_queue_stopped(q)) { | |
93 | spin_unlock_irqrestore(q->queue_lock, flags); | |
94 | return; | |
95 | } | |
96 | ||
97 | blk_stop_queue(q); | |
98 | spin_unlock_irqrestore(q->queue_lock, flags); | |
99 | } | |
100 | ||
101 | void dm_stop_queue(struct request_queue *q) | |
102 | { | |
103 | if (!q->mq_ops) | |
104 | dm_old_stop_queue(q); | |
7d9595d8 MS |
105 | else { |
106 | spin_lock_irq(q->queue_lock); | |
107 | queue_flag_set(QUEUE_FLAG_STOPPED, q); | |
108 | spin_unlock_irq(q->queue_lock); | |
109 | ||
110 | blk_mq_cancel_requeue_work(q); | |
4cc96131 | 111 | blk_mq_stop_hw_queues(q); |
7d9595d8 | 112 | } |
4cc96131 MS |
113 | } |
114 | ||
115 | static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md, | |
116 | gfp_t gfp_mask) | |
117 | { | |
118 | return mempool_alloc(md->io_pool, gfp_mask); | |
119 | } | |
120 | ||
121 | static void free_old_rq_tio(struct dm_rq_target_io *tio) | |
122 | { | |
123 | mempool_free(tio, tio->md->io_pool); | |
124 | } | |
125 | ||
126 | static struct request *alloc_old_clone_request(struct mapped_device *md, | |
127 | gfp_t gfp_mask) | |
128 | { | |
129 | return mempool_alloc(md->rq_pool, gfp_mask); | |
130 | } | |
131 | ||
132 | static void free_old_clone_request(struct mapped_device *md, struct request *rq) | |
133 | { | |
134 | mempool_free(rq, md->rq_pool); | |
135 | } | |
136 | ||
137 | /* | |
138 | * Partial completion handling for request-based dm | |
139 | */ | |
140 | static void end_clone_bio(struct bio *clone) | |
141 | { | |
142 | struct dm_rq_clone_bio_info *info = | |
143 | container_of(clone, struct dm_rq_clone_bio_info, clone); | |
144 | struct dm_rq_target_io *tio = info->tio; | |
145 | struct bio *bio = info->orig; | |
146 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; | |
147 | int error = clone->bi_error; | |
148 | ||
149 | bio_put(clone); | |
150 | ||
151 | if (tio->error) | |
152 | /* | |
153 | * An error has already been detected on the request. | |
154 | * Once error occurred, just let clone->end_io() handle | |
155 | * the remainder. | |
156 | */ | |
157 | return; | |
158 | else if (error) { | |
159 | /* | |
160 | * Don't notice the error to the upper layer yet. | |
161 | * The error handling decision is made by the target driver, | |
162 | * when the request is completed. | |
163 | */ | |
164 | tio->error = error; | |
165 | return; | |
166 | } | |
167 | ||
168 | /* | |
169 | * I/O for the bio successfully completed. | |
170 | * Notice the data completion to the upper layer. | |
171 | */ | |
172 | ||
173 | /* | |
174 | * bios are processed from the head of the list. | |
175 | * So the completing bio should always be rq->bio. | |
176 | * If it's not, something wrong is happening. | |
177 | */ | |
178 | if (tio->orig->bio != bio) | |
179 | DMERR("bio completion is going in the middle of the request"); | |
180 | ||
181 | /* | |
182 | * Update the original request. | |
183 | * Do not use blk_end_request() here, because it may complete | |
184 | * the original request before the clone, and break the ordering. | |
185 | */ | |
186 | blk_update_request(tio->orig, 0, nr_bytes); | |
187 | } | |
188 | ||
189 | static struct dm_rq_target_io *tio_from_request(struct request *rq) | |
190 | { | |
191 | return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); | |
192 | } | |
193 | ||
194 | static void rq_end_stats(struct mapped_device *md, struct request *orig) | |
195 | { | |
196 | if (unlikely(dm_stats_used(&md->stats))) { | |
197 | struct dm_rq_target_io *tio = tio_from_request(orig); | |
198 | tio->duration_jiffies = jiffies - tio->duration_jiffies; | |
199 | dm_stats_account_io(&md->stats, rq_data_dir(orig), | |
200 | blk_rq_pos(orig), tio->n_sectors, true, | |
201 | tio->duration_jiffies, &tio->stats_aux); | |
202 | } | |
203 | } | |
204 | ||
205 | /* | |
206 | * Don't touch any member of the md after calling this function because | |
207 | * the md may be freed in dm_put() at the end of this function. | |
208 | * Or do dm_get() before calling this function and dm_put() later. | |
209 | */ | |
210 | static void rq_completed(struct mapped_device *md, int rw, bool run_queue) | |
211 | { | |
212 | atomic_dec(&md->pending[rw]); | |
213 | ||
214 | /* nudge anyone waiting on suspend queue */ | |
215 | if (!md_in_flight(md)) | |
216 | wake_up(&md->wait); | |
217 | ||
218 | /* | |
219 | * Run this off this callpath, as drivers could invoke end_io while | |
220 | * inside their request_fn (and holding the queue lock). Calling | |
221 | * back into ->request_fn() could deadlock attempting to grab the | |
222 | * queue lock again. | |
223 | */ | |
224 | if (!md->queue->mq_ops && run_queue) | |
225 | blk_run_queue_async(md->queue); | |
226 | ||
227 | /* | |
228 | * dm_put() must be at the end of this function. See the comment above | |
229 | */ | |
230 | dm_put(md); | |
231 | } | |
232 | ||
233 | static void free_rq_clone(struct request *clone) | |
234 | { | |
235 | struct dm_rq_target_io *tio = clone->end_io_data; | |
236 | struct mapped_device *md = tio->md; | |
237 | ||
238 | blk_rq_unprep_clone(clone); | |
239 | ||
e83068a5 MS |
240 | /* |
241 | * It is possible for a clone_old_rq() allocated clone to | |
242 | * get passed in -- it may not yet have a request_queue. | |
243 | * This is known to occur if the error target replaces | |
244 | * a multipath target that has a request_fn queue stacked | |
245 | * on blk-mq queue(s). | |
246 | */ | |
247 | if (clone->q && clone->q->mq_ops) | |
4cc96131 MS |
248 | /* stacked on blk-mq queue(s) */ |
249 | tio->ti->type->release_clone_rq(clone); | |
250 | else if (!md->queue->mq_ops) | |
251 | /* request_fn queue stacked on request_fn queue(s) */ | |
252 | free_old_clone_request(md, clone); | |
253 | ||
254 | if (!md->queue->mq_ops) | |
255 | free_old_rq_tio(tio); | |
256 | } | |
257 | ||
258 | /* | |
259 | * Complete the clone and the original request. | |
260 | * Must be called without clone's queue lock held, | |
261 | * see end_clone_request() for more details. | |
262 | */ | |
263 | static void dm_end_request(struct request *clone, int error) | |
264 | { | |
265 | int rw = rq_data_dir(clone); | |
266 | struct dm_rq_target_io *tio = clone->end_io_data; | |
267 | struct mapped_device *md = tio->md; | |
268 | struct request *rq = tio->orig; | |
269 | ||
270 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | |
271 | rq->errors = clone->errors; | |
272 | rq->resid_len = clone->resid_len; | |
273 | ||
274 | if (rq->sense) | |
275 | /* | |
276 | * We are using the sense buffer of the original | |
277 | * request. | |
278 | * So setting the length of the sense data is enough. | |
279 | */ | |
280 | rq->sense_len = clone->sense_len; | |
281 | } | |
282 | ||
283 | free_rq_clone(clone); | |
284 | rq_end_stats(md, rq); | |
285 | if (!rq->q->mq_ops) | |
286 | blk_end_request_all(rq, error); | |
287 | else | |
288 | blk_mq_end_request(rq, error); | |
289 | rq_completed(md, rw, true); | |
290 | } | |
291 | ||
292 | static void dm_unprep_request(struct request *rq) | |
293 | { | |
294 | struct dm_rq_target_io *tio = tio_from_request(rq); | |
295 | struct request *clone = tio->clone; | |
296 | ||
297 | if (!rq->q->mq_ops) { | |
298 | rq->special = NULL; | |
299 | rq->cmd_flags &= ~REQ_DONTPREP; | |
300 | } | |
301 | ||
302 | if (clone) | |
303 | free_rq_clone(clone); | |
304 | else if (!tio->md->queue->mq_ops) | |
305 | free_old_rq_tio(tio); | |
306 | } | |
307 | ||
308 | /* | |
309 | * Requeue the original request of a clone. | |
310 | */ | |
311 | static void dm_old_requeue_request(struct request *rq) | |
312 | { | |
313 | struct request_queue *q = rq->q; | |
314 | unsigned long flags; | |
315 | ||
316 | spin_lock_irqsave(q->queue_lock, flags); | |
317 | blk_requeue_request(q, rq); | |
318 | blk_run_queue_async(q); | |
319 | spin_unlock_irqrestore(q->queue_lock, flags); | |
320 | } | |
321 | ||
322 | static void dm_mq_requeue_request(struct request *rq) | |
323 | { | |
324 | struct request_queue *q = rq->q; | |
325 | unsigned long flags; | |
326 | ||
327 | blk_mq_requeue_request(rq); | |
328 | spin_lock_irqsave(q->queue_lock, flags); | |
329 | if (!blk_queue_stopped(q)) | |
330 | blk_mq_kick_requeue_list(q); | |
331 | spin_unlock_irqrestore(q->queue_lock, flags); | |
332 | } | |
333 | ||
334 | static void dm_requeue_original_request(struct mapped_device *md, | |
335 | struct request *rq) | |
336 | { | |
337 | int rw = rq_data_dir(rq); | |
338 | ||
339 | rq_end_stats(md, rq); | |
340 | dm_unprep_request(rq); | |
341 | ||
342 | if (!rq->q->mq_ops) | |
343 | dm_old_requeue_request(rq); | |
344 | else | |
345 | dm_mq_requeue_request(rq); | |
346 | ||
347 | rq_completed(md, rw, false); | |
348 | } | |
349 | ||
350 | static void dm_done(struct request *clone, int error, bool mapped) | |
351 | { | |
352 | int r = error; | |
353 | struct dm_rq_target_io *tio = clone->end_io_data; | |
354 | dm_request_endio_fn rq_end_io = NULL; | |
355 | ||
356 | if (tio->ti) { | |
357 | rq_end_io = tio->ti->type->rq_end_io; | |
358 | ||
359 | if (mapped && rq_end_io) | |
360 | r = rq_end_io(tio->ti, clone, error, &tio->info); | |
361 | } | |
362 | ||
363 | if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) && | |
364 | !clone->q->limits.max_write_same_sectors)) | |
365 | disable_write_same(tio->md); | |
366 | ||
367 | if (r <= 0) | |
368 | /* The target wants to complete the I/O */ | |
369 | dm_end_request(clone, r); | |
370 | else if (r == DM_ENDIO_INCOMPLETE) | |
371 | /* The target will handle the I/O */ | |
372 | return; | |
373 | else if (r == DM_ENDIO_REQUEUE) | |
374 | /* The target wants to requeue the I/O */ | |
375 | dm_requeue_original_request(tio->md, tio->orig); | |
376 | else { | |
377 | DMWARN("unimplemented target endio return value: %d", r); | |
378 | BUG(); | |
379 | } | |
380 | } | |
381 | ||
382 | /* | |
383 | * Request completion handler for request-based dm | |
384 | */ | |
385 | static void dm_softirq_done(struct request *rq) | |
386 | { | |
387 | bool mapped = true; | |
388 | struct dm_rq_target_io *tio = tio_from_request(rq); | |
389 | struct request *clone = tio->clone; | |
390 | int rw; | |
391 | ||
392 | if (!clone) { | |
393 | rq_end_stats(tio->md, rq); | |
394 | rw = rq_data_dir(rq); | |
395 | if (!rq->q->mq_ops) { | |
396 | blk_end_request_all(rq, tio->error); | |
397 | rq_completed(tio->md, rw, false); | |
398 | free_old_rq_tio(tio); | |
399 | } else { | |
400 | blk_mq_end_request(rq, tio->error); | |
401 | rq_completed(tio->md, rw, false); | |
402 | } | |
403 | return; | |
404 | } | |
405 | ||
406 | if (rq->cmd_flags & REQ_FAILED) | |
407 | mapped = false; | |
408 | ||
409 | dm_done(clone, tio->error, mapped); | |
410 | } | |
411 | ||
412 | /* | |
413 | * Complete the clone and the original request with the error status | |
414 | * through softirq context. | |
415 | */ | |
416 | static void dm_complete_request(struct request *rq, int error) | |
417 | { | |
418 | struct dm_rq_target_io *tio = tio_from_request(rq); | |
419 | ||
420 | tio->error = error; | |
421 | if (!rq->q->mq_ops) | |
422 | blk_complete_request(rq); | |
423 | else | |
424 | blk_mq_complete_request(rq, error); | |
425 | } | |
426 | ||
427 | /* | |
428 | * Complete the not-mapped clone and the original request with the error status | |
429 | * through softirq context. | |
430 | * Target's rq_end_io() function isn't called. | |
431 | * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. | |
432 | */ | |
433 | static void dm_kill_unmapped_request(struct request *rq, int error) | |
434 | { | |
435 | rq->cmd_flags |= REQ_FAILED; | |
436 | dm_complete_request(rq, error); | |
437 | } | |
438 | ||
439 | /* | |
440 | * Called with the clone's queue lock held (in the case of .request_fn) | |
441 | */ | |
442 | static void end_clone_request(struct request *clone, int error) | |
443 | { | |
444 | struct dm_rq_target_io *tio = clone->end_io_data; | |
445 | ||
446 | if (!clone->q->mq_ops) { | |
447 | /* | |
448 | * For just cleaning up the information of the queue in which | |
449 | * the clone was dispatched. | |
450 | * The clone is *NOT* freed actually here because it is alloced | |
451 | * from dm own mempool (REQ_ALLOCED isn't set). | |
452 | */ | |
453 | __blk_put_request(clone->q, clone); | |
454 | } | |
455 | ||
456 | /* | |
457 | * Actual request completion is done in a softirq context which doesn't | |
458 | * hold the clone's queue lock. Otherwise, deadlock could occur because: | |
459 | * - another request may be submitted by the upper level driver | |
460 | * of the stacking during the completion | |
461 | * - the submission which requires queue lock may be done | |
462 | * against this clone's queue | |
463 | */ | |
464 | dm_complete_request(tio->orig, error); | |
465 | } | |
466 | ||
467 | static void dm_dispatch_clone_request(struct request *clone, struct request *rq) | |
468 | { | |
469 | int r; | |
470 | ||
471 | if (blk_queue_io_stat(clone->q)) | |
472 | clone->cmd_flags |= REQ_IO_STAT; | |
473 | ||
474 | clone->start_time = jiffies; | |
475 | r = blk_insert_cloned_request(clone->q, clone); | |
476 | if (r) | |
477 | /* must complete clone in terms of original request */ | |
478 | dm_complete_request(rq, r); | |
479 | } | |
480 | ||
481 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, | |
482 | void *data) | |
483 | { | |
484 | struct dm_rq_target_io *tio = data; | |
485 | struct dm_rq_clone_bio_info *info = | |
486 | container_of(bio, struct dm_rq_clone_bio_info, clone); | |
487 | ||
488 | info->orig = bio_orig; | |
489 | info->tio = tio; | |
490 | bio->bi_end_io = end_clone_bio; | |
491 | ||
492 | return 0; | |
493 | } | |
494 | ||
495 | static int setup_clone(struct request *clone, struct request *rq, | |
496 | struct dm_rq_target_io *tio, gfp_t gfp_mask) | |
497 | { | |
498 | int r; | |
499 | ||
500 | r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, | |
501 | dm_rq_bio_constructor, tio); | |
502 | if (r) | |
503 | return r; | |
504 | ||
505 | clone->cmd = rq->cmd; | |
506 | clone->cmd_len = rq->cmd_len; | |
507 | clone->sense = rq->sense; | |
508 | clone->end_io = end_clone_request; | |
509 | clone->end_io_data = tio; | |
510 | ||
511 | tio->clone = clone; | |
512 | ||
513 | return 0; | |
514 | } | |
515 | ||
516 | static struct request *clone_old_rq(struct request *rq, struct mapped_device *md, | |
517 | struct dm_rq_target_io *tio, gfp_t gfp_mask) | |
518 | { | |
519 | /* | |
520 | * Create clone for use with .request_fn request_queue | |
521 | */ | |
522 | struct request *clone; | |
523 | ||
524 | clone = alloc_old_clone_request(md, gfp_mask); | |
525 | if (!clone) | |
526 | return NULL; | |
527 | ||
528 | blk_rq_init(NULL, clone); | |
529 | if (setup_clone(clone, rq, tio, gfp_mask)) { | |
530 | /* -ENOMEM */ | |
531 | free_old_clone_request(md, clone); | |
532 | return NULL; | |
533 | } | |
534 | ||
535 | return clone; | |
536 | } | |
537 | ||
538 | static void map_tio_request(struct kthread_work *work); | |
539 | ||
540 | static void init_tio(struct dm_rq_target_io *tio, struct request *rq, | |
541 | struct mapped_device *md) | |
542 | { | |
543 | tio->md = md; | |
544 | tio->ti = NULL; | |
545 | tio->clone = NULL; | |
546 | tio->orig = rq; | |
547 | tio->error = 0; | |
548 | /* | |
549 | * Avoid initializing info for blk-mq; it passes | |
550 | * target-specific data through info.ptr | |
551 | * (see: dm_mq_init_request) | |
552 | */ | |
553 | if (!md->init_tio_pdu) | |
554 | memset(&tio->info, 0, sizeof(tio->info)); | |
555 | if (md->kworker_task) | |
556 | init_kthread_work(&tio->work, map_tio_request); | |
557 | } | |
558 | ||
559 | static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq, | |
560 | struct mapped_device *md, | |
561 | gfp_t gfp_mask) | |
562 | { | |
563 | struct dm_rq_target_io *tio; | |
564 | int srcu_idx; | |
565 | struct dm_table *table; | |
566 | ||
567 | tio = alloc_old_rq_tio(md, gfp_mask); | |
568 | if (!tio) | |
569 | return NULL; | |
570 | ||
571 | init_tio(tio, rq, md); | |
572 | ||
573 | table = dm_get_live_table(md, &srcu_idx); | |
574 | /* | |
575 | * Must clone a request if this .request_fn DM device | |
576 | * is stacked on .request_fn device(s). | |
577 | */ | |
e83068a5 | 578 | if (!dm_table_all_blk_mq_devices(table)) { |
4cc96131 MS |
579 | if (!clone_old_rq(rq, md, tio, gfp_mask)) { |
580 | dm_put_live_table(md, srcu_idx); | |
581 | free_old_rq_tio(tio); | |
582 | return NULL; | |
583 | } | |
584 | } | |
585 | dm_put_live_table(md, srcu_idx); | |
586 | ||
587 | return tio; | |
588 | } | |
589 | ||
590 | /* | |
591 | * Called with the queue lock held. | |
592 | */ | |
593 | static int dm_old_prep_fn(struct request_queue *q, struct request *rq) | |
594 | { | |
595 | struct mapped_device *md = q->queuedata; | |
596 | struct dm_rq_target_io *tio; | |
597 | ||
598 | if (unlikely(rq->special)) { | |
599 | DMWARN("Already has something in rq->special."); | |
600 | return BLKPREP_KILL; | |
601 | } | |
602 | ||
603 | tio = dm_old_prep_tio(rq, md, GFP_ATOMIC); | |
604 | if (!tio) | |
605 | return BLKPREP_DEFER; | |
606 | ||
607 | rq->special = tio; | |
608 | rq->cmd_flags |= REQ_DONTPREP; | |
609 | ||
610 | return BLKPREP_OK; | |
611 | } | |
612 | ||
613 | /* | |
614 | * Returns: | |
615 | * 0 : the request has been processed | |
616 | * DM_MAPIO_REQUEUE : the original request needs to be requeued | |
617 | * < 0 : the request was completed due to failure | |
618 | */ | |
619 | static int map_request(struct dm_rq_target_io *tio, struct request *rq, | |
620 | struct mapped_device *md) | |
621 | { | |
622 | int r; | |
623 | struct dm_target *ti = tio->ti; | |
624 | struct request *clone = NULL; | |
625 | ||
626 | if (tio->clone) { | |
627 | clone = tio->clone; | |
628 | r = ti->type->map_rq(ti, clone, &tio->info); | |
629 | } else { | |
630 | r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); | |
631 | if (r < 0) { | |
632 | /* The target wants to complete the I/O */ | |
633 | dm_kill_unmapped_request(rq, r); | |
634 | return r; | |
635 | } | |
636 | if (r != DM_MAPIO_REMAPPED) | |
637 | return r; | |
638 | if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { | |
639 | /* -ENOMEM */ | |
640 | ti->type->release_clone_rq(clone); | |
641 | return DM_MAPIO_REQUEUE; | |
642 | } | |
643 | } | |
644 | ||
645 | switch (r) { | |
646 | case DM_MAPIO_SUBMITTED: | |
647 | /* The target has taken the I/O to submit by itself later */ | |
648 | break; | |
649 | case DM_MAPIO_REMAPPED: | |
650 | /* The target has remapped the I/O so dispatch it */ | |
651 | trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), | |
652 | blk_rq_pos(rq)); | |
653 | dm_dispatch_clone_request(clone, rq); | |
654 | break; | |
655 | case DM_MAPIO_REQUEUE: | |
656 | /* The target wants to requeue the I/O */ | |
657 | dm_requeue_original_request(md, tio->orig); | |
658 | break; | |
659 | default: | |
660 | if (r > 0) { | |
661 | DMWARN("unimplemented target map return value: %d", r); | |
662 | BUG(); | |
663 | } | |
664 | ||
665 | /* The target wants to complete the I/O */ | |
666 | dm_kill_unmapped_request(rq, r); | |
667 | return r; | |
668 | } | |
669 | ||
670 | return 0; | |
671 | } | |
672 | ||
673 | static void dm_start_request(struct mapped_device *md, struct request *orig) | |
674 | { | |
675 | if (!orig->q->mq_ops) | |
676 | blk_start_request(orig); | |
677 | else | |
678 | blk_mq_start_request(orig); | |
679 | atomic_inc(&md->pending[rq_data_dir(orig)]); | |
680 | ||
681 | if (md->seq_rq_merge_deadline_usecs) { | |
682 | md->last_rq_pos = rq_end_sector(orig); | |
683 | md->last_rq_rw = rq_data_dir(orig); | |
684 | md->last_rq_start_time = ktime_get(); | |
685 | } | |
686 | ||
687 | if (unlikely(dm_stats_used(&md->stats))) { | |
688 | struct dm_rq_target_io *tio = tio_from_request(orig); | |
689 | tio->duration_jiffies = jiffies; | |
690 | tio->n_sectors = blk_rq_sectors(orig); | |
691 | dm_stats_account_io(&md->stats, rq_data_dir(orig), | |
692 | blk_rq_pos(orig), tio->n_sectors, false, 0, | |
693 | &tio->stats_aux); | |
694 | } | |
695 | ||
696 | /* | |
697 | * Hold the md reference here for the in-flight I/O. | |
698 | * We can't rely on the reference count by device opener, | |
699 | * because the device may be closed during the request completion | |
700 | * when all bios are completed. | |
701 | * See the comment in rq_completed() too. | |
702 | */ | |
703 | dm_get(md); | |
704 | } | |
705 | ||
706 | static void map_tio_request(struct kthread_work *work) | |
707 | { | |
708 | struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); | |
709 | struct request *rq = tio->orig; | |
710 | struct mapped_device *md = tio->md; | |
711 | ||
712 | if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) | |
713 | dm_requeue_original_request(md, rq); | |
714 | } | |
715 | ||
716 | ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) | |
717 | { | |
718 | return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs); | |
719 | } | |
720 | ||
721 | #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000 | |
722 | ||
723 | ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, | |
724 | const char *buf, size_t count) | |
725 | { | |
726 | unsigned deadline; | |
727 | ||
e83068a5 | 728 | if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED) |
4cc96131 MS |
729 | return count; |
730 | ||
731 | if (kstrtouint(buf, 10, &deadline)) | |
732 | return -EINVAL; | |
733 | ||
734 | if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS) | |
735 | deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS; | |
736 | ||
737 | md->seq_rq_merge_deadline_usecs = deadline; | |
738 | ||
739 | return count; | |
740 | } | |
741 | ||
742 | static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md) | |
743 | { | |
744 | ktime_t kt_deadline; | |
745 | ||
746 | if (!md->seq_rq_merge_deadline_usecs) | |
747 | return false; | |
748 | ||
749 | kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC); | |
750 | kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline); | |
751 | ||
752 | return !ktime_after(ktime_get(), kt_deadline); | |
753 | } | |
754 | ||
755 | /* | |
756 | * q->request_fn for old request-based dm. | |
757 | * Called with the queue lock held. | |
758 | */ | |
759 | static void dm_old_request_fn(struct request_queue *q) | |
760 | { | |
761 | struct mapped_device *md = q->queuedata; | |
762 | struct dm_target *ti = md->immutable_target; | |
763 | struct request *rq; | |
764 | struct dm_rq_target_io *tio; | |
765 | sector_t pos = 0; | |
766 | ||
767 | if (unlikely(!ti)) { | |
768 | int srcu_idx; | |
769 | struct dm_table *map = dm_get_live_table(md, &srcu_idx); | |
770 | ||
771 | ti = dm_table_find_target(map, pos); | |
772 | dm_put_live_table(md, srcu_idx); | |
773 | } | |
774 | ||
775 | /* | |
776 | * For suspend, check blk_queue_stopped() and increment | |
777 | * ->pending within a single queue_lock not to increment the | |
778 | * number of in-flight I/Os after the queue is stopped in | |
779 | * dm_suspend(). | |
780 | */ | |
781 | while (!blk_queue_stopped(q)) { | |
782 | rq = blk_peek_request(q); | |
783 | if (!rq) | |
784 | return; | |
785 | ||
786 | /* always use block 0 to find the target for flushes for now */ | |
787 | pos = 0; | |
788 | if (req_op(rq) != REQ_OP_FLUSH) | |
789 | pos = blk_rq_pos(rq); | |
790 | ||
791 | if ((dm_old_request_peeked_before_merge_deadline(md) && | |
792 | md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && | |
793 | md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) || | |
794 | (ti->type->busy && ti->type->busy(ti))) { | |
bd9f55ea | 795 | blk_delay_queue(q, 10); |
4cc96131 MS |
796 | return; |
797 | } | |
798 | ||
799 | dm_start_request(md, rq); | |
800 | ||
801 | tio = tio_from_request(rq); | |
802 | /* Establish tio->ti before queuing work (map_tio_request) */ | |
803 | tio->ti = ti; | |
804 | queue_kthread_work(&md->kworker, &tio->work); | |
805 | BUG_ON(!irqs_disabled()); | |
806 | } | |
807 | } | |
808 | ||
809 | /* | |
810 | * Fully initialize a .request_fn request-based queue. | |
811 | */ | |
812 | int dm_old_init_request_queue(struct mapped_device *md) | |
813 | { | |
814 | /* Fully initialize the queue */ | |
815 | if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL)) | |
816 | return -EINVAL; | |
817 | ||
818 | /* disable dm_old_request_fn's merge heuristic by default */ | |
819 | md->seq_rq_merge_deadline_usecs = 0; | |
820 | ||
821 | dm_init_normal_md_queue(md); | |
822 | blk_queue_softirq_done(md->queue, dm_softirq_done); | |
823 | blk_queue_prep_rq(md->queue, dm_old_prep_fn); | |
824 | ||
825 | /* Initialize the request-based DM worker thread */ | |
826 | init_kthread_worker(&md->kworker); | |
827 | md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, | |
828 | "kdmwork-%s", dm_device_name(md)); | |
7193a9de MS |
829 | if (IS_ERR(md->kworker_task)) |
830 | return PTR_ERR(md->kworker_task); | |
4cc96131 MS |
831 | |
832 | elv_register_queue(md->queue); | |
833 | ||
834 | return 0; | |
835 | } | |
836 | ||
837 | static int dm_mq_init_request(void *data, struct request *rq, | |
838 | unsigned int hctx_idx, unsigned int request_idx, | |
839 | unsigned int numa_node) | |
840 | { | |
841 | struct mapped_device *md = data; | |
842 | struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); | |
843 | ||
844 | /* | |
845 | * Must initialize md member of tio, otherwise it won't | |
846 | * be available in dm_mq_queue_rq. | |
847 | */ | |
848 | tio->md = md; | |
849 | ||
850 | if (md->init_tio_pdu) { | |
851 | /* target-specific per-io data is immediately after the tio */ | |
852 | tio->info.ptr = tio + 1; | |
853 | } | |
854 | ||
855 | return 0; | |
856 | } | |
857 | ||
858 | static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |
859 | const struct blk_mq_queue_data *bd) | |
860 | { | |
861 | struct request *rq = bd->rq; | |
862 | struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); | |
863 | struct mapped_device *md = tio->md; | |
864 | struct dm_target *ti = md->immutable_target; | |
865 | ||
866 | if (unlikely(!ti)) { | |
867 | int srcu_idx; | |
868 | struct dm_table *map = dm_get_live_table(md, &srcu_idx); | |
869 | ||
870 | ti = dm_table_find_target(map, 0); | |
871 | dm_put_live_table(md, srcu_idx); | |
872 | } | |
873 | ||
7d9595d8 MS |
874 | /* |
875 | * On suspend dm_stop_queue() handles stopping the blk-mq | |
876 | * request_queue BUT: even though the hw_queues are marked | |
877 | * BLK_MQ_S_STOPPED at that point there is still a race that | |
878 | * is allowing block/blk-mq.c to call ->queue_rq against a | |
879 | * hctx that it really shouldn't. The following check guards | |
880 | * against this rarity (albeit _not_ race-free). | |
881 | */ | |
882 | if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) | |
883 | return BLK_MQ_RQ_QUEUE_BUSY; | |
884 | ||
4cc96131 MS |
885 | if (ti->type->busy && ti->type->busy(ti)) |
886 | return BLK_MQ_RQ_QUEUE_BUSY; | |
887 | ||
888 | dm_start_request(md, rq); | |
889 | ||
890 | /* Init tio using md established in .init_request */ | |
891 | init_tio(tio, rq, md); | |
892 | ||
893 | /* | |
894 | * Establish tio->ti before calling map_request(). | |
895 | */ | |
896 | tio->ti = ti; | |
897 | ||
898 | /* Direct call is fine since .queue_rq allows allocations */ | |
899 | if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { | |
900 | /* Undo dm_start_request() before requeuing */ | |
901 | rq_end_stats(md, rq); | |
902 | rq_completed(md, rq_data_dir(rq), false); | |
903 | return BLK_MQ_RQ_QUEUE_BUSY; | |
904 | } | |
905 | ||
906 | return BLK_MQ_RQ_QUEUE_OK; | |
907 | } | |
908 | ||
909 | static struct blk_mq_ops dm_mq_ops = { | |
910 | .queue_rq = dm_mq_queue_rq, | |
911 | .map_queue = blk_mq_map_queue, | |
912 | .complete = dm_softirq_done, | |
913 | .init_request = dm_mq_init_request, | |
914 | }; | |
915 | ||
e83068a5 | 916 | int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) |
4cc96131 MS |
917 | { |
918 | struct request_queue *q; | |
e83068a5 | 919 | struct dm_target *immutable_tgt; |
4cc96131 MS |
920 | int err; |
921 | ||
e83068a5 | 922 | if (!dm_table_all_blk_mq_devices(t)) { |
4cc96131 MS |
923 | DMERR("request-based dm-mq may only be stacked on blk-mq device(s)"); |
924 | return -EINVAL; | |
925 | } | |
926 | ||
927 | md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); | |
928 | if (!md->tag_set) | |
929 | return -ENOMEM; | |
930 | ||
931 | md->tag_set->ops = &dm_mq_ops; | |
932 | md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); | |
933 | md->tag_set->numa_node = md->numa_node_id; | |
934 | md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; | |
935 | md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); | |
936 | md->tag_set->driver_data = md; | |
937 | ||
938 | md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); | |
e83068a5 | 939 | immutable_tgt = dm_table_get_immutable_target(t); |
4cc96131 MS |
940 | if (immutable_tgt && immutable_tgt->per_io_data_size) { |
941 | /* any target-specific per-io data is immediately after the tio */ | |
942 | md->tag_set->cmd_size += immutable_tgt->per_io_data_size; | |
943 | md->init_tio_pdu = true; | |
944 | } | |
945 | ||
946 | err = blk_mq_alloc_tag_set(md->tag_set); | |
947 | if (err) | |
948 | goto out_kfree_tag_set; | |
949 | ||
950 | q = blk_mq_init_allocated_queue(md->tag_set, md->queue); | |
951 | if (IS_ERR(q)) { | |
952 | err = PTR_ERR(q); | |
953 | goto out_tag_set; | |
954 | } | |
955 | dm_init_md_queue(md); | |
956 | ||
957 | /* backfill 'mq' sysfs registration normally done in blk_register_queue */ | |
b21d5b30 | 958 | blk_mq_register_dev(disk_to_dev(md->disk), q); |
4cc96131 MS |
959 | |
960 | return 0; | |
961 | ||
962 | out_tag_set: | |
963 | blk_mq_free_tag_set(md->tag_set); | |
964 | out_kfree_tag_set: | |
965 | kfree(md->tag_set); | |
966 | ||
967 | return err; | |
968 | } | |
969 | ||
970 | void dm_mq_cleanup_mapped_device(struct mapped_device *md) | |
971 | { | |
972 | if (md->tag_set) { | |
973 | blk_mq_free_tag_set(md->tag_set); | |
974 | kfree(md->tag_set); | |
975 | } | |
976 | } | |
977 | ||
978 | module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); | |
979 | MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); | |
980 | ||
981 | module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR); | |
982 | MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); | |
983 | ||
984 | module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR); | |
985 | MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices"); | |
986 | ||
987 | module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR); | |
988 | MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices"); |