Commit | Line | Data |
---|---|---|
4cc96131 MS |
1 | /* |
2 | * Copyright (C) 2016 Red Hat, Inc. All rights reserved. | |
3 | * | |
4 | * This file is released under the GPL. | |
5 | */ | |
6 | ||
7 | #include "dm-core.h" | |
8 | #include "dm-rq.h" | |
9 | ||
10 | #include <linux/elevator.h> /* for rq_end_sector() */ | |
11 | #include <linux/blk-mq.h> | |
12 | ||
13 | #define DM_MSG_PREFIX "core-rq" | |
14 | ||
15 | #define DM_MQ_NR_HW_QUEUES 1 | |
16 | #define DM_MQ_QUEUE_DEPTH 2048 | |
17 | static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES; | |
18 | static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH; | |
19 | ||
20 | /* | |
21 | * Request-based DM's mempools' reserved IOs set by the user. | |
22 | */ | |
23 | #define RESERVED_REQUEST_BASED_IOS 256 | |
24 | static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; | |
25 | ||
4cc96131 MS |
26 | unsigned dm_get_reserved_rq_based_ios(void) |
27 | { | |
28 | return __dm_get_module_param(&reserved_rq_based_ios, | |
29 | RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS); | |
30 | } | |
31 | EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); | |
32 | ||
33 | static unsigned dm_get_blk_mq_nr_hw_queues(void) | |
34 | { | |
35 | return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32); | |
36 | } | |
37 | ||
38 | static unsigned dm_get_blk_mq_queue_depth(void) | |
39 | { | |
40 | return __dm_get_module_param(&dm_mq_queue_depth, | |
41 | DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH); | |
42 | } | |
43 | ||
44 | int dm_request_based(struct mapped_device *md) | |
45 | { | |
344e9ffc | 46 | return queue_is_mq(md->queue); |
4cc96131 MS |
47 | } |
48 | ||
6a23e05c | 49 | void dm_start_queue(struct request_queue *q) |
9dbeaeab | 50 | { |
f660174e | 51 | blk_mq_unquiesce_queue(q); |
9dbeaeab MS |
52 | blk_mq_kick_requeue_list(q); |
53 | } | |
54 | ||
6a23e05c | 55 | void dm_stop_queue(struct request_queue *q) |
2397a15a | 56 | { |
f0d33ab7 | 57 | if (blk_mq_queue_stopped(q)) |
4cc96131 | 58 | return; |
2397a15a | 59 | |
7b17c2f7 | 60 | blk_mq_quiesce_queue(q); |
4cc96131 MS |
61 | } |
62 | ||
4cc96131 MS |
63 | /* |
64 | * Partial completion handling for request-based dm | |
65 | */ | |
66 | static void end_clone_bio(struct bio *clone) | |
67 | { | |
68 | struct dm_rq_clone_bio_info *info = | |
69 | container_of(clone, struct dm_rq_clone_bio_info, clone); | |
70 | struct dm_rq_target_io *tio = info->tio; | |
4cc96131 | 71 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; |
4e4cbee9 | 72 | blk_status_t error = clone->bi_status; |
dc6364b5 | 73 | bool is_last = !clone->bi_next; |
4cc96131 MS |
74 | |
75 | bio_put(clone); | |
76 | ||
77 | if (tio->error) | |
78 | /* | |
79 | * An error has already been detected on the request. | |
80 | * Once error occurred, just let clone->end_io() handle | |
81 | * the remainder. | |
82 | */ | |
83 | return; | |
84 | else if (error) { | |
85 | /* | |
86 | * Don't notice the error to the upper layer yet. | |
87 | * The error handling decision is made by the target driver, | |
88 | * when the request is completed. | |
89 | */ | |
90 | tio->error = error; | |
dc6364b5 | 91 | goto exit; |
4cc96131 MS |
92 | } |
93 | ||
94 | /* | |
95 | * I/O for the bio successfully completed. | |
96 | * Notice the data completion to the upper layer. | |
97 | */ | |
dc6364b5 | 98 | tio->completed += nr_bytes; |
4cc96131 MS |
99 | |
100 | /* | |
101 | * Update the original request. | |
102 | * Do not use blk_end_request() here, because it may complete | |
103 | * the original request before the clone, and break the ordering. | |
104 | */ | |
dc6364b5 ML |
105 | if (is_last) |
106 | exit: | |
107 | blk_update_request(tio->orig, BLK_STS_OK, tio->completed); | |
4cc96131 MS |
108 | } |
109 | ||
110 | static struct dm_rq_target_io *tio_from_request(struct request *rq) | |
111 | { | |
eb8db831 | 112 | return blk_mq_rq_to_pdu(rq); |
4cc96131 MS |
113 | } |
114 | ||
115 | static void rq_end_stats(struct mapped_device *md, struct request *orig) | |
116 | { | |
117 | if (unlikely(dm_stats_used(&md->stats))) { | |
118 | struct dm_rq_target_io *tio = tio_from_request(orig); | |
119 | tio->duration_jiffies = jiffies - tio->duration_jiffies; | |
120 | dm_stats_account_io(&md->stats, rq_data_dir(orig), | |
121 | blk_rq_pos(orig), tio->n_sectors, true, | |
122 | tio->duration_jiffies, &tio->stats_aux); | |
123 | } | |
124 | } | |
125 | ||
126 | /* | |
127 | * Don't touch any member of the md after calling this function because | |
128 | * the md may be freed in dm_put() at the end of this function. | |
129 | * Or do dm_get() before calling this function and dm_put() later. | |
130 | */ | |
2adc5c55 | 131 | static void rq_completed(struct mapped_device *md) |
4cc96131 | 132 | { |
4cc96131 | 133 | /* nudge anyone waiting on suspend queue */ |
645efa84 | 134 | if (unlikely(wq_has_sleeper(&md->wait))) |
c4576aed | 135 | wake_up(&md->wait); |
4cc96131 | 136 | |
4cc96131 MS |
137 | /* |
138 | * dm_put() must be at the end of this function. See the comment above | |
139 | */ | |
140 | dm_put(md); | |
141 | } | |
142 | ||
4cc96131 MS |
143 | /* |
144 | * Complete the clone and the original request. | |
145 | * Must be called without clone's queue lock held, | |
146 | * see end_clone_request() for more details. | |
147 | */ | |
2a842aca | 148 | static void dm_end_request(struct request *clone, blk_status_t error) |
4cc96131 | 149 | { |
4cc96131 MS |
150 | struct dm_rq_target_io *tio = clone->end_io_data; |
151 | struct mapped_device *md = tio->md; | |
152 | struct request *rq = tio->orig; | |
153 | ||
eb8db831 CH |
154 | blk_rq_unprep_clone(clone); |
155 | tio->ti->type->release_clone_rq(clone); | |
4cc96131 | 156 | |
4cc96131 | 157 | rq_end_stats(md, rq); |
6a23e05c | 158 | blk_mq_end_request(rq, error); |
2adc5c55 | 159 | rq_completed(md); |
4cc96131 MS |
160 | } |
161 | ||
e0c10752 | 162 | static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) |
4cc96131 | 163 | { |
52d7f1b5 | 164 | blk_mq_delay_kick_requeue_list(q, msecs); |
4cc96131 MS |
165 | } |
166 | ||
e0c10752 MS |
167 | void dm_mq_kick_requeue_list(struct mapped_device *md) |
168 | { | |
169 | __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0); | |
170 | } | |
171 | EXPORT_SYMBOL(dm_mq_kick_requeue_list); | |
172 | ||
173 | static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) | |
174 | { | |
2b053aca | 175 | blk_mq_requeue_request(rq, false); |
e0c10752 MS |
176 | __dm_mq_kick_requeue_list(rq->q, msecs); |
177 | } | |
178 | ||
fbc39b4c | 179 | static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue) |
4cc96131 | 180 | { |
fbc39b4c MS |
181 | struct mapped_device *md = tio->md; |
182 | struct request *rq = tio->orig; | |
d5c27f3f | 183 | unsigned long delay_ms = delay_requeue ? 100 : 0; |
4cc96131 MS |
184 | |
185 | rq_end_stats(md, rq); | |
eb8db831 CH |
186 | if (tio->clone) { |
187 | blk_rq_unprep_clone(tio->clone); | |
188 | tio->ti->type->release_clone_rq(tio->clone); | |
189 | } | |
4cc96131 | 190 | |
6a23e05c | 191 | dm_mq_delay_requeue_request(rq, delay_ms); |
2adc5c55 | 192 | rq_completed(md); |
4cc96131 MS |
193 | } |
194 | ||
2a842aca | 195 | static void dm_done(struct request *clone, blk_status_t error, bool mapped) |
4cc96131 | 196 | { |
7ed8578a | 197 | int r = DM_ENDIO_DONE; |
4cc96131 MS |
198 | struct dm_rq_target_io *tio = clone->end_io_data; |
199 | dm_request_endio_fn rq_end_io = NULL; | |
200 | ||
201 | if (tio->ti) { | |
202 | rq_end_io = tio->ti->type->rq_end_io; | |
203 | ||
204 | if (mapped && rq_end_io) | |
205 | r = rq_end_io(tio->ti, clone, error, &tio->info); | |
206 | } | |
207 | ||
2a842aca | 208 | if (unlikely(error == BLK_STS_TARGET)) { |
ac62d620 CH |
209 | if (req_op(clone) == REQ_OP_WRITE_SAME && |
210 | !clone->q->limits.max_write_same_sectors) | |
211 | disable_write_same(tio->md); | |
212 | if (req_op(clone) == REQ_OP_WRITE_ZEROES && | |
213 | !clone->q->limits.max_write_zeroes_sectors) | |
214 | disable_write_zeroes(tio->md); | |
215 | } | |
4cc96131 | 216 | |
7ed8578a CH |
217 | switch (r) { |
218 | case DM_ENDIO_DONE: | |
4cc96131 | 219 | /* The target wants to complete the I/O */ |
7ed8578a CH |
220 | dm_end_request(clone, error); |
221 | break; | |
222 | case DM_ENDIO_INCOMPLETE: | |
4cc96131 MS |
223 | /* The target will handle the I/O */ |
224 | return; | |
7ed8578a | 225 | case DM_ENDIO_REQUEUE: |
4cc96131 | 226 | /* The target wants to requeue the I/O */ |
fbc39b4c | 227 | dm_requeue_original_request(tio, false); |
7ed8578a | 228 | break; |
ac514ffc MS |
229 | case DM_ENDIO_DELAY_REQUEUE: |
230 | /* The target wants to requeue the I/O after a delay */ | |
231 | dm_requeue_original_request(tio, true); | |
232 | break; | |
7ed8578a | 233 | default: |
4cc96131 MS |
234 | DMWARN("unimplemented target endio return value: %d", r); |
235 | BUG(); | |
236 | } | |
237 | } | |
238 | ||
239 | /* | |
240 | * Request completion handler for request-based dm | |
241 | */ | |
242 | static void dm_softirq_done(struct request *rq) | |
243 | { | |
244 | bool mapped = true; | |
245 | struct dm_rq_target_io *tio = tio_from_request(rq); | |
246 | struct request *clone = tio->clone; | |
4cc96131 MS |
247 | |
248 | if (!clone) { | |
61febef4 JA |
249 | struct mapped_device *md = tio->md; |
250 | ||
251 | rq_end_stats(md, rq); | |
6a23e05c | 252 | blk_mq_end_request(rq, tio->error); |
2adc5c55 | 253 | rq_completed(md); |
4cc96131 MS |
254 | return; |
255 | } | |
256 | ||
e8064021 | 257 | if (rq->rq_flags & RQF_FAILED) |
4cc96131 MS |
258 | mapped = false; |
259 | ||
260 | dm_done(clone, tio->error, mapped); | |
261 | } | |
262 | ||
263 | /* | |
264 | * Complete the clone and the original request with the error status | |
265 | * through softirq context. | |
266 | */ | |
2a842aca | 267 | static void dm_complete_request(struct request *rq, blk_status_t error) |
4cc96131 MS |
268 | { |
269 | struct dm_rq_target_io *tio = tio_from_request(rq); | |
270 | ||
271 | tio->error = error; | |
6a23e05c | 272 | blk_mq_complete_request(rq); |
4cc96131 MS |
273 | } |
274 | ||
275 | /* | |
276 | * Complete the not-mapped clone and the original request with the error status | |
277 | * through softirq context. | |
278 | * Target's rq_end_io() function isn't called. | |
6a23e05c | 279 | * This may be used when the target's clone_and_map_rq() function fails. |
4cc96131 | 280 | */ |
2a842aca | 281 | static void dm_kill_unmapped_request(struct request *rq, blk_status_t error) |
4cc96131 | 282 | { |
e8064021 | 283 | rq->rq_flags |= RQF_FAILED; |
4cc96131 MS |
284 | dm_complete_request(rq, error); |
285 | } | |
286 | ||
2a842aca | 287 | static void end_clone_request(struct request *clone, blk_status_t error) |
4cc96131 MS |
288 | { |
289 | struct dm_rq_target_io *tio = clone->end_io_data; | |
290 | ||
4cc96131 MS |
291 | dm_complete_request(tio->orig, error); |
292 | } | |
293 | ||
396eaf21 | 294 | static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq) |
4cc96131 | 295 | { |
2a842aca | 296 | blk_status_t r; |
4cc96131 MS |
297 | |
298 | if (blk_queue_io_stat(clone->q)) | |
e8064021 | 299 | clone->rq_flags |= RQF_IO_STAT; |
4cc96131 | 300 | |
522a7775 | 301 | clone->start_time_ns = ktime_get_ns(); |
4cc96131 | 302 | r = blk_insert_cloned_request(clone->q, clone); |
86ff7c2a | 303 | if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE) |
4cc96131 MS |
304 | /* must complete clone in terms of original request */ |
305 | dm_complete_request(rq, r); | |
396eaf21 | 306 | return r; |
4cc96131 MS |
307 | } |
308 | ||
309 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, | |
310 | void *data) | |
311 | { | |
312 | struct dm_rq_target_io *tio = data; | |
313 | struct dm_rq_clone_bio_info *info = | |
314 | container_of(bio, struct dm_rq_clone_bio_info, clone); | |
315 | ||
316 | info->orig = bio_orig; | |
317 | info->tio = tio; | |
318 | bio->bi_end_io = end_clone_bio; | |
319 | ||
320 | return 0; | |
321 | } | |
322 | ||
323 | static int setup_clone(struct request *clone, struct request *rq, | |
324 | struct dm_rq_target_io *tio, gfp_t gfp_mask) | |
325 | { | |
326 | int r; | |
327 | ||
6f1c819c | 328 | r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask, |
4cc96131 MS |
329 | dm_rq_bio_constructor, tio); |
330 | if (r) | |
331 | return r; | |
332 | ||
4cc96131 MS |
333 | clone->end_io = end_clone_request; |
334 | clone->end_io_data = tio; | |
335 | ||
336 | tio->clone = clone; | |
337 | ||
338 | return 0; | |
339 | } | |
340 | ||
4cc96131 MS |
341 | static void init_tio(struct dm_rq_target_io *tio, struct request *rq, |
342 | struct mapped_device *md) | |
343 | { | |
344 | tio->md = md; | |
345 | tio->ti = NULL; | |
346 | tio->clone = NULL; | |
347 | tio->orig = rq; | |
348 | tio->error = 0; | |
dc6364b5 | 349 | tio->completed = 0; |
4cc96131 MS |
350 | /* |
351 | * Avoid initializing info for blk-mq; it passes | |
352 | * target-specific data through info.ptr | |
353 | * (see: dm_mq_init_request) | |
354 | */ | |
355 | if (!md->init_tio_pdu) | |
356 | memset(&tio->info, 0, sizeof(tio->info)); | |
4cc96131 MS |
357 | } |
358 | ||
4cc96131 MS |
359 | /* |
360 | * Returns: | |
a8ac51e4 MS |
361 | * DM_MAPIO_* : the request has been processed as indicated |
362 | * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued | |
4cc96131 MS |
363 | * < 0 : the request was completed due to failure |
364 | */ | |
fbc39b4c | 365 | static int map_request(struct dm_rq_target_io *tio) |
4cc96131 MS |
366 | { |
367 | int r; | |
368 | struct dm_target *ti = tio->ti; | |
fbc39b4c MS |
369 | struct mapped_device *md = tio->md; |
370 | struct request *rq = tio->orig; | |
4cc96131 | 371 | struct request *clone = NULL; |
396eaf21 | 372 | blk_status_t ret; |
4cc96131 | 373 | |
eb8db831 | 374 | r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); |
4cc96131 MS |
375 | switch (r) { |
376 | case DM_MAPIO_SUBMITTED: | |
377 | /* The target has taken the I/O to submit by itself later */ | |
378 | break; | |
379 | case DM_MAPIO_REMAPPED: | |
eb8db831 CH |
380 | if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { |
381 | /* -ENOMEM */ | |
382 | ti->type->release_clone_rq(clone); | |
383 | return DM_MAPIO_REQUEUE; | |
384 | } | |
385 | ||
4cc96131 MS |
386 | /* The target has remapped the I/O so dispatch it */ |
387 | trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), | |
388 | blk_rq_pos(rq)); | |
396eaf21 | 389 | ret = dm_dispatch_clone_request(clone, rq); |
86ff7c2a | 390 | if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { |
396eaf21 ML |
391 | blk_rq_unprep_clone(clone); |
392 | tio->ti->type->release_clone_rq(clone); | |
393 | tio->clone = NULL; | |
34743bfd | 394 | return DM_MAPIO_REQUEUE; |
396eaf21 | 395 | } |
4cc96131 MS |
396 | break; |
397 | case DM_MAPIO_REQUEUE: | |
398 | /* The target wants to requeue the I/O */ | |
a8ac51e4 MS |
399 | break; |
400 | case DM_MAPIO_DELAY_REQUEUE: | |
401 | /* The target wants to requeue the I/O after a delay */ | |
fbc39b4c | 402 | dm_requeue_original_request(tio, true); |
4cc96131 | 403 | break; |
412445ac | 404 | case DM_MAPIO_KILL: |
4cc96131 | 405 | /* The target wants to complete the I/O */ |
2a842aca | 406 | dm_kill_unmapped_request(rq, BLK_STS_IOERR); |
ece07280 | 407 | break; |
412445ac CH |
408 | default: |
409 | DMWARN("unimplemented target map return value: %d", r); | |
410 | BUG(); | |
4cc96131 MS |
411 | } |
412 | ||
a8ac51e4 | 413 | return r; |
4cc96131 MS |
414 | } |
415 | ||
6a23e05c JA |
416 | /* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */ |
417 | ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) | |
418 | { | |
419 | return sprintf(buf, "%u\n", 0); | |
420 | } | |
421 | ||
422 | ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, | |
423 | const char *buf, size_t count) | |
424 | { | |
425 | return count; | |
426 | } | |
427 | ||
4cc96131 MS |
428 | static void dm_start_request(struct mapped_device *md, struct request *orig) |
429 | { | |
6a23e05c | 430 | blk_mq_start_request(orig); |
4cc96131 | 431 | |
4cc96131 MS |
432 | if (unlikely(dm_stats_used(&md->stats))) { |
433 | struct dm_rq_target_io *tio = tio_from_request(orig); | |
434 | tio->duration_jiffies = jiffies; | |
435 | tio->n_sectors = blk_rq_sectors(orig); | |
436 | dm_stats_account_io(&md->stats, rq_data_dir(orig), | |
437 | blk_rq_pos(orig), tio->n_sectors, false, 0, | |
438 | &tio->stats_aux); | |
439 | } | |
440 | ||
441 | /* | |
442 | * Hold the md reference here for the in-flight I/O. | |
443 | * We can't rely on the reference count by device opener, | |
444 | * because the device may be closed during the request completion | |
445 | * when all bios are completed. | |
446 | * See the comment in rq_completed() too. | |
447 | */ | |
448 | dm_get(md); | |
449 | } | |
450 | ||
6a23e05c JA |
451 | static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, |
452 | unsigned int hctx_idx, unsigned int numa_node) | |
eb8db831 | 453 | { |
6a23e05c | 454 | struct mapped_device *md = set->driver_data; |
eb8db831 CH |
455 | struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); |
456 | ||
457 | /* | |
458 | * Must initialize md member of tio, otherwise it won't | |
459 | * be available in dm_mq_queue_rq. | |
460 | */ | |
461 | tio->md = md; | |
462 | ||
463 | if (md->init_tio_pdu) { | |
464 | /* target-specific per-io data is immediately after the tio */ | |
465 | tio->info.ptr = tio + 1; | |
466 | } | |
467 | ||
468 | return 0; | |
469 | } | |
470 | ||
fc17b653 | 471 | static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
4cc96131 MS |
472 | const struct blk_mq_queue_data *bd) |
473 | { | |
474 | struct request *rq = bd->rq; | |
475 | struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); | |
476 | struct mapped_device *md = tio->md; | |
477 | struct dm_target *ti = md->immutable_target; | |
478 | ||
479 | if (unlikely(!ti)) { | |
480 | int srcu_idx; | |
481 | struct dm_table *map = dm_get_live_table(md, &srcu_idx); | |
482 | ||
483 | ti = dm_table_find_target(map, 0); | |
484 | dm_put_live_table(md, srcu_idx); | |
485 | } | |
486 | ||
487 | if (ti->type->busy && ti->type->busy(ti)) | |
fc17b653 | 488 | return BLK_STS_RESOURCE; |
4cc96131 MS |
489 | |
490 | dm_start_request(md, rq); | |
491 | ||
492 | /* Init tio using md established in .init_request */ | |
493 | init_tio(tio, rq, md); | |
494 | ||
495 | /* | |
496 | * Establish tio->ti before calling map_request(). | |
497 | */ | |
498 | tio->ti = ti; | |
499 | ||
500 | /* Direct call is fine since .queue_rq allows allocations */ | |
fbc39b4c | 501 | if (map_request(tio) == DM_MAPIO_REQUEUE) { |
4cc96131 MS |
502 | /* Undo dm_start_request() before requeuing */ |
503 | rq_end_stats(md, rq); | |
2adc5c55 | 504 | rq_completed(md); |
fc17b653 | 505 | return BLK_STS_RESOURCE; |
4cc96131 MS |
506 | } |
507 | ||
fc17b653 | 508 | return BLK_STS_OK; |
4cc96131 MS |
509 | } |
510 | ||
f363b089 | 511 | static const struct blk_mq_ops dm_mq_ops = { |
4cc96131 | 512 | .queue_rq = dm_mq_queue_rq, |
4cc96131 MS |
513 | .complete = dm_softirq_done, |
514 | .init_request = dm_mq_init_request, | |
515 | }; | |
516 | ||
e83068a5 | 517 | int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) |
4cc96131 MS |
518 | { |
519 | struct request_queue *q; | |
e83068a5 | 520 | struct dm_target *immutable_tgt; |
4cc96131 MS |
521 | int err; |
522 | ||
4cc96131 MS |
523 | md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); |
524 | if (!md->tag_set) | |
525 | return -ENOMEM; | |
526 | ||
527 | md->tag_set->ops = &dm_mq_ops; | |
528 | md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); | |
529 | md->tag_set->numa_node = md->numa_node_id; | |
530 | md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; | |
531 | md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); | |
532 | md->tag_set->driver_data = md; | |
533 | ||
534 | md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); | |
e83068a5 | 535 | immutable_tgt = dm_table_get_immutable_target(t); |
4cc96131 MS |
536 | if (immutable_tgt && immutable_tgt->per_io_data_size) { |
537 | /* any target-specific per-io data is immediately after the tio */ | |
538 | md->tag_set->cmd_size += immutable_tgt->per_io_data_size; | |
539 | md->init_tio_pdu = true; | |
540 | } | |
541 | ||
542 | err = blk_mq_alloc_tag_set(md->tag_set); | |
543 | if (err) | |
544 | goto out_kfree_tag_set; | |
545 | ||
546 | q = blk_mq_init_allocated_queue(md->tag_set, md->queue); | |
547 | if (IS_ERR(q)) { | |
548 | err = PTR_ERR(q); | |
549 | goto out_tag_set; | |
550 | } | |
4cc96131 | 551 | |
4cc96131 MS |
552 | return 0; |
553 | ||
554 | out_tag_set: | |
555 | blk_mq_free_tag_set(md->tag_set); | |
556 | out_kfree_tag_set: | |
557 | kfree(md->tag_set); | |
558 | ||
559 | return err; | |
560 | } | |
561 | ||
562 | void dm_mq_cleanup_mapped_device(struct mapped_device *md) | |
563 | { | |
564 | if (md->tag_set) { | |
565 | blk_mq_free_tag_set(md->tag_set); | |
566 | kfree(md->tag_set); | |
567 | } | |
568 | } | |
569 | ||
570 | module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); | |
571 | MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); | |
572 | ||
6a23e05c JA |
573 | /* Unused, but preserved for userspace compatibility */ |
574 | static bool use_blk_mq = true; | |
4cc96131 MS |
575 | module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR); |
576 | MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); | |
577 | ||
578 | module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR); | |
579 | MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices"); | |
580 | ||
581 | module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR); | |
582 | MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices"); |