Commit | Line | Data |
---|---|---|
3bd94003 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
4cc96131 MS |
2 | /* |
3 | * Copyright (C) 2016 Red Hat, Inc. All rights reserved. | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include "dm-core.h" | |
9 | #include "dm-rq.h" | |
10 | ||
4cc96131 MS |
11 | #include <linux/blk-mq.h> |
12 | ||
13 | #define DM_MSG_PREFIX "core-rq" | |
14 | ||
e689fbab MS |
15 | /* |
16 | * One of these is allocated per request. | |
17 | */ | |
18 | struct dm_rq_target_io { | |
19 | struct mapped_device *md; | |
20 | struct dm_target *ti; | |
21 | struct request *orig, *clone; | |
22 | struct kthread_work work; | |
23 | blk_status_t error; | |
24 | union map_info info; | |
25 | struct dm_stats_aux stats_aux; | |
26 | unsigned long duration_jiffies; | |
86a3238c HM |
27 | unsigned int n_sectors; |
28 | unsigned int completed; | |
e689fbab MS |
29 | }; |
30 | ||
4cc96131 MS |
31 | #define DM_MQ_NR_HW_QUEUES 1 |
32 | #define DM_MQ_QUEUE_DEPTH 2048 | |
86a3238c HM |
33 | static unsigned int dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES; |
34 | static unsigned int dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH; | |
4cc96131 MS |
35 | |
36 | /* | |
37 | * Request-based DM's mempools' reserved IOs set by the user. | |
38 | */ | |
39 | #define RESERVED_REQUEST_BASED_IOS 256 | |
86a3238c | 40 | static unsigned int reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; |
4cc96131 | 41 | |
86a3238c | 42 | unsigned int dm_get_reserved_rq_based_ios(void) |
4cc96131 MS |
43 | { |
44 | return __dm_get_module_param(&reserved_rq_based_ios, | |
45 | RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS); | |
46 | } | |
4cc96131 | 47 | |
86a3238c | 48 | static unsigned int dm_get_blk_mq_nr_hw_queues(void) |
4cc96131 MS |
49 | { |
50 | return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32); | |
51 | } | |
52 | ||
86a3238c | 53 | static unsigned int dm_get_blk_mq_queue_depth(void) |
4cc96131 MS |
54 | { |
55 | return __dm_get_module_param(&dm_mq_queue_depth, | |
56 | DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH); | |
57 | } | |
58 | ||
59 | int dm_request_based(struct mapped_device *md) | |
60 | { | |
344e9ffc | 61 | return queue_is_mq(md->queue); |
4cc96131 MS |
62 | } |
63 | ||
6a23e05c | 64 | void dm_start_queue(struct request_queue *q) |
9dbeaeab | 65 | { |
f660174e | 66 | blk_mq_unquiesce_queue(q); |
9dbeaeab MS |
67 | blk_mq_kick_requeue_list(q); |
68 | } | |
69 | ||
6a23e05c | 70 | void dm_stop_queue(struct request_queue *q) |
2397a15a | 71 | { |
7b17c2f7 | 72 | blk_mq_quiesce_queue(q); |
4cc96131 MS |
73 | } |
74 | ||
4cc96131 MS |
75 | /* |
76 | * Partial completion handling for request-based dm | |
77 | */ | |
78 | static void end_clone_bio(struct bio *clone) | |
79 | { | |
80 | struct dm_rq_clone_bio_info *info = | |
81 | container_of(clone, struct dm_rq_clone_bio_info, clone); | |
82 | struct dm_rq_target_io *tio = info->tio; | |
4cc96131 | 83 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; |
4e4cbee9 | 84 | blk_status_t error = clone->bi_status; |
dc6364b5 | 85 | bool is_last = !clone->bi_next; |
4cc96131 MS |
86 | |
87 | bio_put(clone); | |
88 | ||
89 | if (tio->error) | |
90 | /* | |
91 | * An error has already been detected on the request. | |
92 | * Once error occurred, just let clone->end_io() handle | |
93 | * the remainder. | |
94 | */ | |
95 | return; | |
96 | else if (error) { | |
97 | /* | |
98 | * Don't notice the error to the upper layer yet. | |
99 | * The error handling decision is made by the target driver, | |
100 | * when the request is completed. | |
101 | */ | |
102 | tio->error = error; | |
dc6364b5 | 103 | goto exit; |
4cc96131 MS |
104 | } |
105 | ||
106 | /* | |
107 | * I/O for the bio successfully completed. | |
108 | * Notice the data completion to the upper layer. | |
109 | */ | |
dc6364b5 | 110 | tio->completed += nr_bytes; |
4cc96131 MS |
111 | |
112 | /* | |
113 | * Update the original request. | |
d370ad23 | 114 | * Do not use blk_mq_end_request() here, because it may complete |
4cc96131 MS |
115 | * the original request before the clone, and break the ordering. |
116 | */ | |
dc6364b5 ML |
117 | if (is_last) |
118 | exit: | |
119 | blk_update_request(tio->orig, BLK_STS_OK, tio->completed); | |
4cc96131 MS |
120 | } |
121 | ||
122 | static struct dm_rq_target_io *tio_from_request(struct request *rq) | |
123 | { | |
eb8db831 | 124 | return blk_mq_rq_to_pdu(rq); |
4cc96131 MS |
125 | } |
126 | ||
127 | static void rq_end_stats(struct mapped_device *md, struct request *orig) | |
128 | { | |
129 | if (unlikely(dm_stats_used(&md->stats))) { | |
130 | struct dm_rq_target_io *tio = tio_from_request(orig); | |
0ef0b471 | 131 | |
4cc96131 MS |
132 | tio->duration_jiffies = jiffies - tio->duration_jiffies; |
133 | dm_stats_account_io(&md->stats, rq_data_dir(orig), | |
134 | blk_rq_pos(orig), tio->n_sectors, true, | |
135 | tio->duration_jiffies, &tio->stats_aux); | |
136 | } | |
137 | } | |
138 | ||
139 | /* | |
140 | * Don't touch any member of the md after calling this function because | |
141 | * the md may be freed in dm_put() at the end of this function. | |
142 | * Or do dm_get() before calling this function and dm_put() later. | |
143 | */ | |
2adc5c55 | 144 | static void rq_completed(struct mapped_device *md) |
4cc96131 | 145 | { |
4cc96131 MS |
146 | /* |
147 | * dm_put() must be at the end of this function. See the comment above | |
148 | */ | |
149 | dm_put(md); | |
150 | } | |
151 | ||
4cc96131 MS |
152 | /* |
153 | * Complete the clone and the original request. | |
154 | * Must be called without clone's queue lock held, | |
155 | * see end_clone_request() for more details. | |
156 | */ | |
2a842aca | 157 | static void dm_end_request(struct request *clone, blk_status_t error) |
4cc96131 | 158 | { |
4cc96131 MS |
159 | struct dm_rq_target_io *tio = clone->end_io_data; |
160 | struct mapped_device *md = tio->md; | |
161 | struct request *rq = tio->orig; | |
162 | ||
eb8db831 | 163 | blk_rq_unprep_clone(clone); |
5de719e3 | 164 | tio->ti->type->release_clone_rq(clone, NULL); |
4cc96131 | 165 | |
4cc96131 | 166 | rq_end_stats(md, rq); |
6a23e05c | 167 | blk_mq_end_request(rq, error); |
2adc5c55 | 168 | rq_completed(md); |
4cc96131 MS |
169 | } |
170 | ||
e0c10752 | 171 | static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) |
4cc96131 | 172 | { |
52d7f1b5 | 173 | blk_mq_delay_kick_requeue_list(q, msecs); |
4cc96131 MS |
174 | } |
175 | ||
e0c10752 MS |
176 | void dm_mq_kick_requeue_list(struct mapped_device *md) |
177 | { | |
33bd6f06 | 178 | __dm_mq_kick_requeue_list(md->queue, 0); |
e0c10752 MS |
179 | } |
180 | EXPORT_SYMBOL(dm_mq_kick_requeue_list); | |
181 | ||
182 | static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) | |
183 | { | |
2b053aca | 184 | blk_mq_requeue_request(rq, false); |
e0c10752 MS |
185 | __dm_mq_kick_requeue_list(rq->q, msecs); |
186 | } | |
187 | ||
fbc39b4c | 188 | static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue) |
4cc96131 | 189 | { |
fbc39b4c MS |
190 | struct mapped_device *md = tio->md; |
191 | struct request *rq = tio->orig; | |
d5c27f3f | 192 | unsigned long delay_ms = delay_requeue ? 100 : 0; |
4cc96131 MS |
193 | |
194 | rq_end_stats(md, rq); | |
eb8db831 CH |
195 | if (tio->clone) { |
196 | blk_rq_unprep_clone(tio->clone); | |
5de719e3 | 197 | tio->ti->type->release_clone_rq(tio->clone, NULL); |
eb8db831 | 198 | } |
4cc96131 | 199 | |
6a23e05c | 200 | dm_mq_delay_requeue_request(rq, delay_ms); |
2adc5c55 | 201 | rq_completed(md); |
4cc96131 MS |
202 | } |
203 | ||
2a842aca | 204 | static void dm_done(struct request *clone, blk_status_t error, bool mapped) |
4cc96131 | 205 | { |
7ed8578a | 206 | int r = DM_ENDIO_DONE; |
4cc96131 MS |
207 | struct dm_rq_target_io *tio = clone->end_io_data; |
208 | dm_request_endio_fn rq_end_io = NULL; | |
209 | ||
210 | if (tio->ti) { | |
211 | rq_end_io = tio->ti->type->rq_end_io; | |
212 | ||
213 | if (mapped && rq_end_io) | |
214 | r = rq_end_io(tio->ti, clone, error, &tio->info); | |
215 | } | |
216 | ||
2a842aca | 217 | if (unlikely(error == BLK_STS_TARGET)) { |
bcb44433 MS |
218 | if (req_op(clone) == REQ_OP_DISCARD && |
219 | !clone->q->limits.max_discard_sectors) | |
220 | disable_discard(tio->md); | |
bcb44433 MS |
221 | else if (req_op(clone) == REQ_OP_WRITE_ZEROES && |
222 | !clone->q->limits.max_write_zeroes_sectors) | |
ac62d620 CH |
223 | disable_write_zeroes(tio->md); |
224 | } | |
4cc96131 | 225 | |
7ed8578a CH |
226 | switch (r) { |
227 | case DM_ENDIO_DONE: | |
4cc96131 | 228 | /* The target wants to complete the I/O */ |
7ed8578a CH |
229 | dm_end_request(clone, error); |
230 | break; | |
231 | case DM_ENDIO_INCOMPLETE: | |
4cc96131 MS |
232 | /* The target will handle the I/O */ |
233 | return; | |
7ed8578a | 234 | case DM_ENDIO_REQUEUE: |
4cc96131 | 235 | /* The target wants to requeue the I/O */ |
fbc39b4c | 236 | dm_requeue_original_request(tio, false); |
7ed8578a | 237 | break; |
ac514ffc MS |
238 | case DM_ENDIO_DELAY_REQUEUE: |
239 | /* The target wants to requeue the I/O after a delay */ | |
240 | dm_requeue_original_request(tio, true); | |
241 | break; | |
7ed8578a | 242 | default: |
43e6c111 | 243 | DMCRIT("unimplemented target endio return value: %d", r); |
4cc96131 MS |
244 | BUG(); |
245 | } | |
246 | } | |
247 | ||
248 | /* | |
249 | * Request completion handler for request-based dm | |
250 | */ | |
251 | static void dm_softirq_done(struct request *rq) | |
252 | { | |
253 | bool mapped = true; | |
254 | struct dm_rq_target_io *tio = tio_from_request(rq); | |
255 | struct request *clone = tio->clone; | |
4cc96131 MS |
256 | |
257 | if (!clone) { | |
61febef4 JA |
258 | struct mapped_device *md = tio->md; |
259 | ||
260 | rq_end_stats(md, rq); | |
6a23e05c | 261 | blk_mq_end_request(rq, tio->error); |
2adc5c55 | 262 | rq_completed(md); |
4cc96131 MS |
263 | return; |
264 | } | |
265 | ||
e8064021 | 266 | if (rq->rq_flags & RQF_FAILED) |
4cc96131 MS |
267 | mapped = false; |
268 | ||
269 | dm_done(clone, tio->error, mapped); | |
270 | } | |
271 | ||
272 | /* | |
273 | * Complete the clone and the original request with the error status | |
274 | * through softirq context. | |
275 | */ | |
2a842aca | 276 | static void dm_complete_request(struct request *rq, blk_status_t error) |
4cc96131 MS |
277 | { |
278 | struct dm_rq_target_io *tio = tio_from_request(rq); | |
279 | ||
280 | tio->error = error; | |
15f73f5b CH |
281 | if (likely(!blk_should_fake_timeout(rq->q))) |
282 | blk_mq_complete_request(rq); | |
4cc96131 MS |
283 | } |
284 | ||
285 | /* | |
286 | * Complete the not-mapped clone and the original request with the error status | |
287 | * through softirq context. | |
288 | * Target's rq_end_io() function isn't called. | |
6a23e05c | 289 | * This may be used when the target's clone_and_map_rq() function fails. |
4cc96131 | 290 | */ |
2a842aca | 291 | static void dm_kill_unmapped_request(struct request *rq, blk_status_t error) |
4cc96131 | 292 | { |
e8064021 | 293 | rq->rq_flags |= RQF_FAILED; |
4cc96131 MS |
294 | dm_complete_request(rq, error); |
295 | } | |
296 | ||
de671d61 JA |
297 | static enum rq_end_io_ret end_clone_request(struct request *clone, |
298 | blk_status_t error) | |
4cc96131 MS |
299 | { |
300 | struct dm_rq_target_io *tio = clone->end_io_data; | |
301 | ||
4cc96131 | 302 | dm_complete_request(tio->orig, error); |
de671d61 | 303 | return RQ_END_IO_NONE; |
4cc96131 MS |
304 | } |
305 | ||
4cc96131 MS |
306 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, |
307 | void *data) | |
308 | { | |
309 | struct dm_rq_target_io *tio = data; | |
310 | struct dm_rq_clone_bio_info *info = | |
311 | container_of(bio, struct dm_rq_clone_bio_info, clone); | |
312 | ||
313 | info->orig = bio_orig; | |
314 | info->tio = tio; | |
315 | bio->bi_end_io = end_clone_bio; | |
316 | ||
317 | return 0; | |
318 | } | |
319 | ||
320 | static int setup_clone(struct request *clone, struct request *rq, | |
321 | struct dm_rq_target_io *tio, gfp_t gfp_mask) | |
322 | { | |
323 | int r; | |
324 | ||
29dec90a | 325 | r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask, |
4cc96131 MS |
326 | dm_rq_bio_constructor, tio); |
327 | if (r) | |
328 | return r; | |
329 | ||
4cc96131 MS |
330 | clone->end_io = end_clone_request; |
331 | clone->end_io_data = tio; | |
332 | ||
333 | tio->clone = clone; | |
334 | ||
335 | return 0; | |
336 | } | |
337 | ||
4cc96131 MS |
338 | static void init_tio(struct dm_rq_target_io *tio, struct request *rq, |
339 | struct mapped_device *md) | |
340 | { | |
341 | tio->md = md; | |
342 | tio->ti = NULL; | |
343 | tio->clone = NULL; | |
344 | tio->orig = rq; | |
345 | tio->error = 0; | |
dc6364b5 | 346 | tio->completed = 0; |
4cc96131 MS |
347 | /* |
348 | * Avoid initializing info for blk-mq; it passes | |
349 | * target-specific data through info.ptr | |
350 | * (see: dm_mq_init_request) | |
351 | */ | |
352 | if (!md->init_tio_pdu) | |
353 | memset(&tio->info, 0, sizeof(tio->info)); | |
4cc96131 MS |
354 | } |
355 | ||
4cc96131 MS |
356 | /* |
357 | * Returns: | |
a8ac51e4 MS |
358 | * DM_MAPIO_* : the request has been processed as indicated |
359 | * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued | |
4cc96131 MS |
360 | * < 0 : the request was completed due to failure |
361 | */ | |
fbc39b4c | 362 | static int map_request(struct dm_rq_target_io *tio) |
4cc96131 MS |
363 | { |
364 | int r; | |
365 | struct dm_target *ti = tio->ti; | |
fbc39b4c MS |
366 | struct mapped_device *md = tio->md; |
367 | struct request *rq = tio->orig; | |
4cc96131 | 368 | struct request *clone = NULL; |
396eaf21 | 369 | blk_status_t ret; |
4cc96131 | 370 | |
eb8db831 | 371 | r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); |
4cc96131 MS |
372 | switch (r) { |
373 | case DM_MAPIO_SUBMITTED: | |
374 | /* The target has taken the I/O to submit by itself later */ | |
375 | break; | |
376 | case DM_MAPIO_REMAPPED: | |
eb8db831 CH |
377 | if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { |
378 | /* -ENOMEM */ | |
5de719e3 | 379 | ti->type->release_clone_rq(clone, &tio->info); |
eb8db831 CH |
380 | return DM_MAPIO_REQUEUE; |
381 | } | |
382 | ||
4cc96131 | 383 | /* The target has remapped the I/O so dispatch it */ |
a54895fa | 384 | trace_block_rq_remap(clone, disk_devt(dm_disk(md)), |
4cc96131 | 385 | blk_rq_pos(rq)); |
9f9adea7 CH |
386 | ret = blk_insert_cloned_request(clone); |
387 | switch (ret) { | |
388 | case BLK_STS_OK: | |
389 | break; | |
390 | case BLK_STS_RESOURCE: | |
391 | case BLK_STS_DEV_RESOURCE: | |
396eaf21 | 392 | blk_rq_unprep_clone(clone); |
226b4fc7 | 393 | blk_mq_cleanup_rq(clone); |
5de719e3 | 394 | tio->ti->type->release_clone_rq(clone, &tio->info); |
396eaf21 | 395 | tio->clone = NULL; |
34743bfd | 396 | return DM_MAPIO_REQUEUE; |
9f9adea7 CH |
397 | default: |
398 | /* must complete clone in terms of original request */ | |
399 | dm_complete_request(rq, ret); | |
396eaf21 | 400 | } |
4cc96131 MS |
401 | break; |
402 | case DM_MAPIO_REQUEUE: | |
403 | /* The target wants to requeue the I/O */ | |
a8ac51e4 MS |
404 | break; |
405 | case DM_MAPIO_DELAY_REQUEUE: | |
406 | /* The target wants to requeue the I/O after a delay */ | |
fbc39b4c | 407 | dm_requeue_original_request(tio, true); |
4cc96131 | 408 | break; |
412445ac | 409 | case DM_MAPIO_KILL: |
4cc96131 | 410 | /* The target wants to complete the I/O */ |
2a842aca | 411 | dm_kill_unmapped_request(rq, BLK_STS_IOERR); |
ece07280 | 412 | break; |
412445ac | 413 | default: |
43e6c111 | 414 | DMCRIT("unimplemented target map return value: %d", r); |
412445ac | 415 | BUG(); |
4cc96131 MS |
416 | } |
417 | ||
a8ac51e4 | 418 | return r; |
4cc96131 MS |
419 | } |
420 | ||
6a23e05c JA |
421 | /* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */ |
422 | ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) | |
423 | { | |
424 | return sprintf(buf, "%u\n", 0); | |
425 | } | |
426 | ||
427 | ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, | |
428 | const char *buf, size_t count) | |
429 | { | |
430 | return count; | |
431 | } | |
432 | ||
4cc96131 MS |
433 | static void dm_start_request(struct mapped_device *md, struct request *orig) |
434 | { | |
6a23e05c | 435 | blk_mq_start_request(orig); |
4cc96131 | 436 | |
4cc96131 MS |
437 | if (unlikely(dm_stats_used(&md->stats))) { |
438 | struct dm_rq_target_io *tio = tio_from_request(orig); | |
0ef0b471 | 439 | |
4cc96131 MS |
440 | tio->duration_jiffies = jiffies; |
441 | tio->n_sectors = blk_rq_sectors(orig); | |
442 | dm_stats_account_io(&md->stats, rq_data_dir(orig), | |
443 | blk_rq_pos(orig), tio->n_sectors, false, 0, | |
444 | &tio->stats_aux); | |
445 | } | |
446 | ||
447 | /* | |
448 | * Hold the md reference here for the in-flight I/O. | |
449 | * We can't rely on the reference count by device opener, | |
450 | * because the device may be closed during the request completion | |
451 | * when all bios are completed. | |
452 | * See the comment in rq_completed() too. | |
453 | */ | |
454 | dm_get(md); | |
455 | } | |
456 | ||
6a23e05c JA |
457 | static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, |
458 | unsigned int hctx_idx, unsigned int numa_node) | |
eb8db831 | 459 | { |
6a23e05c | 460 | struct mapped_device *md = set->driver_data; |
eb8db831 CH |
461 | struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); |
462 | ||
463 | /* | |
464 | * Must initialize md member of tio, otherwise it won't | |
465 | * be available in dm_mq_queue_rq. | |
466 | */ | |
467 | tio->md = md; | |
468 | ||
469 | if (md->init_tio_pdu) { | |
470 | /* target-specific per-io data is immediately after the tio */ | |
471 | tio->info.ptr = tio + 1; | |
472 | } | |
473 | ||
474 | return 0; | |
475 | } | |
476 | ||
fc17b653 | 477 | static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
4cc96131 MS |
478 | const struct blk_mq_queue_data *bd) |
479 | { | |
480 | struct request *rq = bd->rq; | |
481 | struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); | |
482 | struct mapped_device *md = tio->md; | |
483 | struct dm_target *ti = md->immutable_target; | |
484 | ||
b4459b11 ML |
485 | /* |
486 | * blk-mq's unquiesce may come from outside events, such as | |
487 | * elevator switch, updating nr_requests or others, and request may | |
488 | * come during suspend, so simply ask for blk-mq to requeue it. | |
489 | */ | |
490 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) | |
491 | return BLK_STS_RESOURCE; | |
492 | ||
4cc96131 MS |
493 | if (unlikely(!ti)) { |
494 | int srcu_idx; | |
fa247089 | 495 | struct dm_table *map; |
4cc96131 | 496 | |
fa247089 MS |
497 | map = dm_get_live_table(md, &srcu_idx); |
498 | if (unlikely(!map)) { | |
499 | dm_put_live_table(md, srcu_idx); | |
500 | return BLK_STS_RESOURCE; | |
501 | } | |
4cc96131 MS |
502 | ti = dm_table_find_target(map, 0); |
503 | dm_put_live_table(md, srcu_idx); | |
504 | } | |
505 | ||
506 | if (ti->type->busy && ti->type->busy(ti)) | |
fc17b653 | 507 | return BLK_STS_RESOURCE; |
4cc96131 MS |
508 | |
509 | dm_start_request(md, rq); | |
510 | ||
511 | /* Init tio using md established in .init_request */ | |
512 | init_tio(tio, rq, md); | |
513 | ||
514 | /* | |
515 | * Establish tio->ti before calling map_request(). | |
516 | */ | |
517 | tio->ti = ti; | |
518 | ||
519 | /* Direct call is fine since .queue_rq allows allocations */ | |
fbc39b4c | 520 | if (map_request(tio) == DM_MAPIO_REQUEUE) { |
4cc96131 MS |
521 | /* Undo dm_start_request() before requeuing */ |
522 | rq_end_stats(md, rq); | |
2adc5c55 | 523 | rq_completed(md); |
fc17b653 | 524 | return BLK_STS_RESOURCE; |
4cc96131 MS |
525 | } |
526 | ||
fc17b653 | 527 | return BLK_STS_OK; |
4cc96131 MS |
528 | } |
529 | ||
f363b089 | 530 | static const struct blk_mq_ops dm_mq_ops = { |
4cc96131 | 531 | .queue_rq = dm_mq_queue_rq, |
4cc96131 MS |
532 | .complete = dm_softirq_done, |
533 | .init_request = dm_mq_init_request, | |
534 | }; | |
535 | ||
e83068a5 | 536 | int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) |
4cc96131 | 537 | { |
e83068a5 | 538 | struct dm_target *immutable_tgt; |
4cc96131 MS |
539 | int err; |
540 | ||
4cc96131 MS |
541 | md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); |
542 | if (!md->tag_set) | |
543 | return -ENOMEM; | |
544 | ||
545 | md->tag_set->ops = &dm_mq_ops; | |
546 | md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); | |
547 | md->tag_set->numa_node = md->numa_node_id; | |
bf0beec0 | 548 | md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING; |
4cc96131 MS |
549 | md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); |
550 | md->tag_set->driver_data = md; | |
551 | ||
552 | md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); | |
e83068a5 | 553 | immutable_tgt = dm_table_get_immutable_target(t); |
4cc96131 MS |
554 | if (immutable_tgt && immutable_tgt->per_io_data_size) { |
555 | /* any target-specific per-io data is immediately after the tio */ | |
556 | md->tag_set->cmd_size += immutable_tgt->per_io_data_size; | |
557 | md->init_tio_pdu = true; | |
558 | } | |
559 | ||
560 | err = blk_mq_alloc_tag_set(md->tag_set); | |
561 | if (err) | |
562 | goto out_kfree_tag_set; | |
563 | ||
26a9750a CH |
564 | err = blk_mq_init_allocated_queue(md->tag_set, md->queue); |
565 | if (err) | |
4cc96131 | 566 | goto out_tag_set; |
4cc96131 MS |
567 | return 0; |
568 | ||
569 | out_tag_set: | |
570 | blk_mq_free_tag_set(md->tag_set); | |
571 | out_kfree_tag_set: | |
572 | kfree(md->tag_set); | |
8e947c8f | 573 | md->tag_set = NULL; |
4cc96131 MS |
574 | |
575 | return err; | |
576 | } | |
577 | ||
578 | void dm_mq_cleanup_mapped_device(struct mapped_device *md) | |
579 | { | |
580 | if (md->tag_set) { | |
581 | blk_mq_free_tag_set(md->tag_set); | |
582 | kfree(md->tag_set); | |
8e947c8f | 583 | md->tag_set = NULL; |
4cc96131 MS |
584 | } |
585 | } | |
586 | ||
6a808034 | 587 | module_param(reserved_rq_based_ios, uint, 0644); |
4cc96131 MS |
588 | MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); |
589 | ||
6a23e05c JA |
590 | /* Unused, but preserved for userspace compatibility */ |
591 | static bool use_blk_mq = true; | |
6a808034 | 592 | module_param(use_blk_mq, bool, 0644); |
4cc96131 MS |
593 | MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); |
594 | ||
6a808034 | 595 | module_param(dm_mq_nr_hw_queues, uint, 0644); |
4cc96131 MS |
596 | MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices"); |
597 | ||
6a808034 | 598 | module_param(dm_mq_queue_depth, uint, 0644); |
4cc96131 | 599 | MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices"); |