Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
bd166ef1 JA |
2 | /* |
3 | * blk-mq scheduling framework | |
4 | * | |
5 | * Copyright (C) 2016 Jens Axboe | |
6 | */ | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/module.h> | |
6e6fcbc2 | 9 | #include <linux/list_sort.h> |
bd166ef1 JA |
10 | |
11 | #include <trace/events/block.h> | |
12 | ||
13 | #include "blk.h" | |
14 | #include "blk-mq.h" | |
d332ce09 | 15 | #include "blk-mq-debugfs.h" |
bd166ef1 | 16 | #include "blk-mq-sched.h" |
bd166ef1 JA |
17 | #include "blk-wbt.h" |
18 | ||
8e8320c9 | 19 | /* |
c31e76bc | 20 | * Mark a hardware queue as needing a restart. |
8e8320c9 | 21 | */ |
7211aef8 | 22 | void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) |
8e8320c9 JA |
23 | { |
24 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
25 | return; | |
26 | ||
97889f9a | 27 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
8e8320c9 | 28 | } |
7211aef8 | 29 | EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); |
8e8320c9 | 30 | |
e9ea1596 | 31 | void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) |
8e8320c9 | 32 | { |
97889f9a | 33 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
8e8320c9 | 34 | |
d7d8535f ML |
35 | /* |
36 | * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) | |
37 | * in blk_mq_run_hw_queue(). Its pair is the barrier in | |
38 | * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, | |
39 | * meantime new request added to hctx->dispatch is missed to check in | |
40 | * blk_mq_run_hw_queue(). | |
41 | */ | |
42 | smp_mb(); | |
43 | ||
97889f9a | 44 | blk_mq_run_hw_queue(hctx, true); |
8e8320c9 JA |
45 | } |
46 | ||
4f0f586b ST |
47 | static int sched_rq_cmp(void *priv, const struct list_head *a, |
48 | const struct list_head *b) | |
6e6fcbc2 ML |
49 | { |
50 | struct request *rqa = container_of(a, struct request, queuelist); | |
51 | struct request *rqb = container_of(b, struct request, queuelist); | |
52 | ||
53 | return rqa->mq_hctx > rqb->mq_hctx; | |
54 | } | |
55 | ||
56 | static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list) | |
57 | { | |
58 | struct blk_mq_hw_ctx *hctx = | |
59 | list_first_entry(rq_list, struct request, queuelist)->mq_hctx; | |
60 | struct request *rq; | |
61 | LIST_HEAD(hctx_list); | |
6e6fcbc2 ML |
62 | |
63 | list_for_each_entry(rq, rq_list, queuelist) { | |
64 | if (rq->mq_hctx != hctx) { | |
65 | list_cut_before(&hctx_list, rq_list, &rq->queuelist); | |
66 | goto dispatch; | |
67 | } | |
6e6fcbc2 ML |
68 | } |
69 | list_splice_tail_init(rq_list, &hctx_list); | |
70 | ||
71 | dispatch: | |
e093b784 | 72 | return blk_mq_dispatch_rq_list(hctx, &hctx_list, false); |
6e6fcbc2 ML |
73 | } |
74 | ||
a0823421 DA |
75 | #define BLK_MQ_BUDGET_DELAY 3 /* ms units */ |
76 | ||
1f460b63 ML |
77 | /* |
78 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts | |
79 | * its queue by itself in its completion handler, so we don't need to | |
01542f65 | 80 | * restart queue if .get_budget() fails to get the budget. |
28d65729 SQ |
81 | * |
82 | * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to | |
83 | * be run again. This is necessary to avoid starving flushes. | |
1f460b63 | 84 | */ |
6e6fcbc2 | 85 | static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) |
caf8eb0d ML |
86 | { |
87 | struct request_queue *q = hctx->queue; | |
88 | struct elevator_queue *e = q->elevator; | |
6e6fcbc2 ML |
89 | bool multi_hctxs = false, run_queue = false; |
90 | bool dispatched = false, busy = false; | |
91 | unsigned int max_dispatch; | |
caf8eb0d | 92 | LIST_HEAD(rq_list); |
6e6fcbc2 ML |
93 | int count = 0; |
94 | ||
95 | if (hctx->dispatch_busy) | |
96 | max_dispatch = 1; | |
97 | else | |
98 | max_dispatch = hctx->queue->nr_requests; | |
caf8eb0d ML |
99 | |
100 | do { | |
6e6fcbc2 | 101 | struct request *rq; |
2a5a24aa | 102 | int budget_token; |
6e6fcbc2 | 103 | |
f9cd4bfe | 104 | if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) |
caf8eb0d | 105 | break; |
de148297 | 106 | |
28d65729 | 107 | if (!list_empty_careful(&hctx->dispatch)) { |
6e6fcbc2 | 108 | busy = true; |
28d65729 SQ |
109 | break; |
110 | } | |
111 | ||
2a5a24aa ML |
112 | budget_token = blk_mq_get_dispatch_budget(q); |
113 | if (budget_token < 0) | |
1f460b63 | 114 | break; |
de148297 | 115 | |
f9cd4bfe | 116 | rq = e->type->ops.dispatch_request(hctx); |
de148297 | 117 | if (!rq) { |
2a5a24aa | 118 | blk_mq_put_dispatch_budget(q, budget_token); |
a0823421 DA |
119 | /* |
120 | * We're releasing without dispatching. Holding the | |
121 | * budget could have blocked any "hctx"s with the | |
122 | * same queue and if we didn't dispatch then there's | |
123 | * no guarantee anyone will kick the queue. Kick it | |
124 | * ourselves. | |
125 | */ | |
6e6fcbc2 | 126 | run_queue = true; |
de148297 | 127 | break; |
de148297 ML |
128 | } |
129 | ||
2a5a24aa ML |
130 | blk_mq_set_rq_budget_token(rq, budget_token); |
131 | ||
de148297 ML |
132 | /* |
133 | * Now this rq owns the budget which has to be released | |
134 | * if this rq won't be queued to driver via .queue_rq() | |
135 | * in blk_mq_dispatch_rq_list(). | |
136 | */ | |
6e6fcbc2 | 137 | list_add_tail(&rq->queuelist, &rq_list); |
61347154 | 138 | count++; |
6e6fcbc2 ML |
139 | if (rq->mq_hctx != hctx) |
140 | multi_hctxs = true; | |
61347154 JK |
141 | |
142 | /* | |
143 | * If we cannot get tag for the request, stop dequeueing | |
144 | * requests from the IO scheduler. We are unlikely to be able | |
145 | * to submit them anyway and it creates false impression for | |
146 | * scheduling heuristics that the device can take more IO. | |
147 | */ | |
148 | if (!blk_mq_get_driver_tag(rq)) | |
149 | break; | |
150 | } while (count < max_dispatch); | |
6e6fcbc2 ML |
151 | |
152 | if (!count) { | |
153 | if (run_queue) | |
154 | blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); | |
155 | } else if (multi_hctxs) { | |
156 | /* | |
157 | * Requests from different hctx may be dequeued from some | |
158 | * schedulers, such as bfq and deadline. | |
159 | * | |
160 | * Sort the requests in the list according to their hctx, | |
161 | * dispatch batching requests from same hctx at a time. | |
162 | */ | |
163 | list_sort(NULL, &rq_list, sched_rq_cmp); | |
164 | do { | |
165 | dispatched |= blk_mq_dispatch_hctx_list(&rq_list); | |
166 | } while (!list_empty(&rq_list)); | |
167 | } else { | |
e093b784 | 168 | dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, false); |
6e6fcbc2 ML |
169 | } |
170 | ||
171 | if (busy) | |
172 | return -EAGAIN; | |
173 | return !!dispatched; | |
174 | } | |
175 | ||
176 | static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) | |
177 | { | |
572299f0 | 178 | unsigned long end = jiffies + HZ; |
6e6fcbc2 ML |
179 | int ret; |
180 | ||
181 | do { | |
182 | ret = __blk_mq_do_dispatch_sched(hctx); | |
572299f0 SK |
183 | if (ret != 1) |
184 | break; | |
185 | if (need_resched() || time_is_before_jiffies(end)) { | |
186 | blk_mq_delay_run_hw_queue(hctx, 0); | |
187 | break; | |
188 | } | |
189 | } while (1); | |
28d65729 SQ |
190 | |
191 | return ret; | |
caf8eb0d ML |
192 | } |
193 | ||
b347689f ML |
194 | static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, |
195 | struct blk_mq_ctx *ctx) | |
196 | { | |
f31967f0 | 197 | unsigned short idx = ctx->index_hw[hctx->type]; |
b347689f ML |
198 | |
199 | if (++idx == hctx->nr_ctx) | |
200 | idx = 0; | |
201 | ||
202 | return hctx->ctxs[idx]; | |
203 | } | |
204 | ||
1f460b63 ML |
205 | /* |
206 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts | |
207 | * its queue by itself in its completion handler, so we don't need to | |
01542f65 | 208 | * restart queue if .get_budget() fails to get the budget. |
28d65729 SQ |
209 | * |
210 | * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to | |
c4aecaa2 | 211 | * be run again. This is necessary to avoid starving flushes. |
1f460b63 | 212 | */ |
28d65729 | 213 | static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) |
b347689f ML |
214 | { |
215 | struct request_queue *q = hctx->queue; | |
216 | LIST_HEAD(rq_list); | |
217 | struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); | |
28d65729 | 218 | int ret = 0; |
445874e8 | 219 | struct request *rq; |
b347689f ML |
220 | |
221 | do { | |
2a5a24aa ML |
222 | int budget_token; |
223 | ||
28d65729 SQ |
224 | if (!list_empty_careful(&hctx->dispatch)) { |
225 | ret = -EAGAIN; | |
226 | break; | |
227 | } | |
228 | ||
b347689f ML |
229 | if (!sbitmap_any_bit_set(&hctx->ctx_map)) |
230 | break; | |
231 | ||
2a5a24aa ML |
232 | budget_token = blk_mq_get_dispatch_budget(q); |
233 | if (budget_token < 0) | |
1f460b63 | 234 | break; |
b347689f ML |
235 | |
236 | rq = blk_mq_dequeue_from_ctx(hctx, ctx); | |
237 | if (!rq) { | |
2a5a24aa | 238 | blk_mq_put_dispatch_budget(q, budget_token); |
a0823421 DA |
239 | /* |
240 | * We're releasing without dispatching. Holding the | |
241 | * budget could have blocked any "hctx"s with the | |
242 | * same queue and if we didn't dispatch then there's | |
243 | * no guarantee anyone will kick the queue. Kick it | |
244 | * ourselves. | |
245 | */ | |
246 | blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); | |
b347689f | 247 | break; |
b347689f ML |
248 | } |
249 | ||
2a5a24aa ML |
250 | blk_mq_set_rq_budget_token(rq, budget_token); |
251 | ||
b347689f ML |
252 | /* |
253 | * Now this rq owns the budget which has to be released | |
254 | * if this rq won't be queued to driver via .queue_rq() | |
255 | * in blk_mq_dispatch_rq_list(). | |
256 | */ | |
257 | list_add(&rq->queuelist, &rq_list); | |
258 | ||
259 | /* round robin for fair dispatch */ | |
260 | ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); | |
261 | ||
e093b784 | 262 | } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, false)); |
b347689f ML |
263 | |
264 | WRITE_ONCE(hctx->dispatch_from, ctx); | |
28d65729 | 265 | return ret; |
b347689f ML |
266 | } |
267 | ||
e1b586f2 | 268 | static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) |
bd166ef1 | 269 | { |
89ea5ceb | 270 | bool need_dispatch = false; |
bd166ef1 JA |
271 | LIST_HEAD(rq_list); |
272 | ||
bd166ef1 JA |
273 | /* |
274 | * If we have previous entries on our dispatch list, grab them first for | |
275 | * more fair dispatch. | |
276 | */ | |
277 | if (!list_empty_careful(&hctx->dispatch)) { | |
278 | spin_lock(&hctx->lock); | |
279 | if (!list_empty(&hctx->dispatch)) | |
280 | list_splice_init(&hctx->dispatch, &rq_list); | |
281 | spin_unlock(&hctx->lock); | |
282 | } | |
283 | ||
284 | /* | |
285 | * Only ask the scheduler for requests, if we didn't have residual | |
286 | * requests from the dispatch list. This is to avoid the case where | |
287 | * we only ever dispatch a fraction of the requests available because | |
288 | * of low device queue depth. Once we pull requests out of the IO | |
289 | * scheduler, we can no longer merge or sort them. So it's best to | |
290 | * leave them there for as long as we can. Mark the hw queue as | |
291 | * needing a restart in that case. | |
caf8eb0d ML |
292 | * |
293 | * We want to dispatch from the scheduler if there was nothing | |
294 | * on the dispatch list or we were able to dispatch from the | |
295 | * dispatch list. | |
bd166ef1 | 296 | */ |
c13660a0 | 297 | if (!list_empty(&rq_list)) { |
d38d3515 | 298 | blk_mq_sched_mark_restart_hctx(hctx); |
e093b784 | 299 | if (!blk_mq_dispatch_rq_list(hctx, &rq_list, true)) |
89ea5ceb CH |
300 | return 0; |
301 | need_dispatch = true; | |
caf8eb0d | 302 | } else { |
89ea5ceb | 303 | need_dispatch = hctx->dispatch_busy; |
64765a75 | 304 | } |
28d65729 | 305 | |
89ea5ceb CH |
306 | if (hctx->queue->elevator) |
307 | return blk_mq_do_dispatch_sched(hctx); | |
308 | ||
309 | /* dequeue request one by one from sw queue if queue is busy */ | |
310 | if (need_dispatch) | |
311 | return blk_mq_do_dispatch_ctx(hctx); | |
312 | blk_mq_flush_busy_ctxs(hctx, &rq_list); | |
e093b784 | 313 | blk_mq_dispatch_rq_list(hctx, &rq_list, true); |
89ea5ceb | 314 | return 0; |
28d65729 SQ |
315 | } |
316 | ||
317 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) | |
318 | { | |
319 | struct request_queue *q = hctx->queue; | |
320 | ||
321 | /* RCU or SRCU read lock is needed before checking quiesced flag */ | |
322 | if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) | |
323 | return; | |
324 | ||
28d65729 SQ |
325 | /* |
326 | * A return of -EAGAIN is an indication that hctx->dispatch is not | |
327 | * empty and we must run again in order to avoid starving flushes. | |
328 | */ | |
329 | if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) { | |
330 | if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) | |
331 | blk_mq_run_hw_queue(hctx, true); | |
332 | } | |
bd166ef1 JA |
333 | } |
334 | ||
179ae84f | 335 | bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, |
14ccb66b | 336 | unsigned int nr_segs) |
bd166ef1 JA |
337 | { |
338 | struct elevator_queue *e = q->elevator; | |
efed9a33 OS |
339 | struct blk_mq_ctx *ctx; |
340 | struct blk_mq_hw_ctx *hctx; | |
9bddeb2a | 341 | bool ret = false; |
c16d6b5a | 342 | enum hctx_type type; |
bd166ef1 | 343 | |
900e0807 JA |
344 | if (e && e->type->ops.bio_merge) { |
345 | ret = e->type->ops.bio_merge(q, bio, nr_segs); | |
346 | goto out_put; | |
347 | } | |
bd166ef1 | 348 | |
efed9a33 | 349 | ctx = blk_mq_get_ctx(q); |
61667cb6 | 350 | hctx = blk_mq_map_queue(bio->bi_opf, ctx); |
c16d6b5a | 351 | type = hctx->type; |
cc76ace4 | 352 | if (list_empty_careful(&ctx->rq_lists[type])) |
900e0807 | 353 | goto out_put; |
cdfcef9e BW |
354 | |
355 | /* default per sw-queue merge */ | |
356 | spin_lock(&ctx->lock); | |
357 | /* | |
358 | * Reverse check our software queue for entries that we could | |
359 | * potentially merge with. Currently includes a hand-wavy stop | |
360 | * count of 8, to not spend too much time checking for merges. | |
361 | */ | |
9a14d6ce | 362 | if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) |
cdfcef9e | 363 | ret = true; |
9bddeb2a | 364 | |
cdfcef9e | 365 | spin_unlock(&ctx->lock); |
900e0807 | 366 | out_put: |
9bddeb2a | 367 | return ret; |
bd166ef1 JA |
368 | } |
369 | ||
fd2ef39c JK |
370 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, |
371 | struct list_head *free) | |
bd166ef1 | 372 | { |
fd2ef39c | 373 | return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free); |
bd166ef1 JA |
374 | } |
375 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); | |
376 | ||
d99a6bb3 JG |
377 | static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, |
378 | struct blk_mq_hw_ctx *hctx, | |
379 | unsigned int hctx_idx) | |
6917ff0b | 380 | { |
079a2e3e JG |
381 | if (blk_mq_is_shared_tags(q->tag_set->flags)) { |
382 | hctx->sched_tags = q->sched_shared_tags; | |
e155b0c2 JG |
383 | return 0; |
384 | } | |
385 | ||
63064be1 JG |
386 | hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, |
387 | q->nr_requests); | |
6917ff0b | 388 | |
6917ff0b OS |
389 | if (!hctx->sched_tags) |
390 | return -ENOMEM; | |
63064be1 | 391 | return 0; |
6917ff0b OS |
392 | } |
393 | ||
079a2e3e | 394 | static void blk_mq_exit_sched_shared_tags(struct request_queue *queue) |
e155b0c2 | 395 | { |
079a2e3e JG |
396 | blk_mq_free_rq_map(queue->sched_shared_tags); |
397 | queue->sched_shared_tags = NULL; | |
e155b0c2 JG |
398 | } |
399 | ||
c3e22192 | 400 | /* called in queue's release handler, tagset has gone away */ |
e155b0c2 | 401 | static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags) |
bd166ef1 | 402 | { |
bd166ef1 | 403 | struct blk_mq_hw_ctx *hctx; |
4f481208 | 404 | unsigned long i; |
6917ff0b | 405 | |
c3e22192 ML |
406 | queue_for_each_hw_ctx(q, hctx, i) { |
407 | if (hctx->sched_tags) { | |
8bdf7b3f | 408 | if (!blk_mq_is_shared_tags(flags)) |
e155b0c2 | 409 | blk_mq_free_rq_map(hctx->sched_tags); |
c3e22192 ML |
410 | hctx->sched_tags = NULL; |
411 | } | |
412 | } | |
e155b0c2 | 413 | |
079a2e3e JG |
414 | if (blk_mq_is_shared_tags(flags)) |
415 | blk_mq_exit_sched_shared_tags(q); | |
6917ff0b OS |
416 | } |
417 | ||
079a2e3e | 418 | static int blk_mq_init_sched_shared_tags(struct request_queue *queue) |
d97e594c JG |
419 | { |
420 | struct blk_mq_tag_set *set = queue->tag_set; | |
d97e594c JG |
421 | |
422 | /* | |
423 | * Set initial depth at max so that we don't need to reallocate for | |
424 | * updating nr_requests. | |
425 | */ | |
079a2e3e | 426 | queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set, |
e155b0c2 JG |
427 | BLK_MQ_NO_HCTX_IDX, |
428 | MAX_SCHED_RQ); | |
079a2e3e | 429 | if (!queue->sched_shared_tags) |
e155b0c2 | 430 | return -ENOMEM; |
d97e594c | 431 | |
079a2e3e | 432 | blk_mq_tag_update_sched_shared_tags(queue); |
d97e594c JG |
433 | |
434 | return 0; | |
435 | } | |
436 | ||
92c22d7e | 437 | void blk_mq_sched_reg_debugfs(struct request_queue *q) |
ed3896ac ML |
438 | { |
439 | struct blk_mq_hw_ctx *hctx; | |
440 | unsigned long i; | |
441 | ||
442 | mutex_lock(&q->debugfs_mutex); | |
443 | blk_mq_debugfs_register_sched(q); | |
444 | queue_for_each_hw_ctx(q, hctx, i) | |
445 | blk_mq_debugfs_register_sched_hctx(q, hctx); | |
446 | mutex_unlock(&q->debugfs_mutex); | |
447 | } | |
448 | ||
92c22d7e | 449 | void blk_mq_sched_unreg_debugfs(struct request_queue *q) |
ed3896ac ML |
450 | { |
451 | struct blk_mq_hw_ctx *hctx; | |
452 | unsigned long i; | |
453 | ||
454 | mutex_lock(&q->debugfs_mutex); | |
455 | queue_for_each_hw_ctx(q, hctx, i) | |
456 | blk_mq_debugfs_unregister_sched_hctx(hctx); | |
457 | blk_mq_debugfs_unregister_sched(q); | |
458 | mutex_unlock(&q->debugfs_mutex); | |
459 | } | |
460 | ||
8ed40ee3 | 461 | /* caller must have a reference to @e, will grab another one if successful */ |
6917ff0b OS |
462 | int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) |
463 | { | |
4f481208 | 464 | unsigned int flags = q->tag_set->flags; |
6917ff0b | 465 | struct blk_mq_hw_ctx *hctx; |
ee056f98 | 466 | struct elevator_queue *eq; |
4f481208 | 467 | unsigned long i; |
6917ff0b OS |
468 | int ret; |
469 | ||
bd166ef1 | 470 | /* |
32825c45 ML |
471 | * Default to double of smaller one between hw queue_depth and 128, |
472 | * since we don't split into sync/async like the old code did. | |
473 | * Additionally, this is a per-hw queue depth. | |
bd166ef1 | 474 | */ |
32825c45 | 475 | q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, |
d2a27964 | 476 | BLKDEV_DEFAULT_RQ); |
bd166ef1 | 477 | |
079a2e3e JG |
478 | if (blk_mq_is_shared_tags(flags)) { |
479 | ret = blk_mq_init_sched_shared_tags(q); | |
bd166ef1 | 480 | if (ret) |
e155b0c2 | 481 | return ret; |
d97e594c JG |
482 | } |
483 | ||
e155b0c2 JG |
484 | queue_for_each_hw_ctx(q, hctx, i) { |
485 | ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); | |
d97e594c | 486 | if (ret) |
d99a6bb3 | 487 | goto err_free_map_and_rqs; |
bd166ef1 JA |
488 | } |
489 | ||
f9cd4bfe | 490 | ret = e->ops.init_sched(q, e); |
6917ff0b | 491 | if (ret) |
e155b0c2 | 492 | goto err_free_map_and_rqs; |
bd166ef1 | 493 | |
d332ce09 | 494 | queue_for_each_hw_ctx(q, hctx, i) { |
f9cd4bfe JA |
495 | if (e->ops.init_hctx) { |
496 | ret = e->ops.init_hctx(hctx, i); | |
ee056f98 OS |
497 | if (ret) { |
498 | eq = q->elevator; | |
1820f4f0 | 499 | blk_mq_sched_free_rqs(q); |
ee056f98 OS |
500 | blk_mq_exit_sched(q, eq); |
501 | kobject_put(&eq->kobj); | |
502 | return ret; | |
503 | } | |
504 | } | |
505 | } | |
bd166ef1 | 506 | return 0; |
bd166ef1 | 507 | |
d99a6bb3 | 508 | err_free_map_and_rqs: |
1820f4f0 | 509 | blk_mq_sched_free_rqs(q); |
e155b0c2 JG |
510 | blk_mq_sched_tags_teardown(q, flags); |
511 | ||
54d5329d | 512 | q->elevator = NULL; |
6917ff0b | 513 | return ret; |
bd166ef1 | 514 | } |
d3484991 | 515 | |
c3e22192 ML |
516 | /* |
517 | * called in either blk_queue_cleanup or elevator_switch, tagset | |
518 | * is required for freeing requests | |
519 | */ | |
1820f4f0 | 520 | void blk_mq_sched_free_rqs(struct request_queue *q) |
c3e22192 ML |
521 | { |
522 | struct blk_mq_hw_ctx *hctx; | |
4f481208 | 523 | unsigned long i; |
c3e22192 | 524 | |
079a2e3e JG |
525 | if (blk_mq_is_shared_tags(q->tag_set->flags)) { |
526 | blk_mq_free_rqs(q->tag_set, q->sched_shared_tags, | |
e155b0c2 JG |
527 | BLK_MQ_NO_HCTX_IDX); |
528 | } else { | |
529 | queue_for_each_hw_ctx(q, hctx, i) { | |
530 | if (hctx->sched_tags) | |
531 | blk_mq_free_rqs(q->tag_set, | |
532 | hctx->sched_tags, i); | |
533 | } | |
c3e22192 ML |
534 | } |
535 | } | |
536 | ||
54d5329d OS |
537 | void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) |
538 | { | |
ee056f98 | 539 | struct blk_mq_hw_ctx *hctx; |
4f481208 | 540 | unsigned long i; |
f0c1c4d2 | 541 | unsigned int flags = 0; |
ee056f98 | 542 | |
ed3896ac | 543 | queue_for_each_hw_ctx(q, hctx, i) { |
f9cd4bfe JA |
544 | if (e->type->ops.exit_hctx && hctx->sched_data) { |
545 | e->type->ops.exit_hctx(hctx, i); | |
d332ce09 | 546 | hctx->sched_data = NULL; |
ee056f98 | 547 | } |
f0c1c4d2 | 548 | flags = hctx->flags; |
ee056f98 | 549 | } |
5cf9c91b | 550 | |
f9cd4bfe JA |
551 | if (e->type->ops.exit_sched) |
552 | e->type->ops.exit_sched(e); | |
e155b0c2 | 553 | blk_mq_sched_tags_teardown(q, flags); |
5c3d858c | 554 | set_bit(ELEVATOR_FLAG_DYING, &q->elevator->flags); |
54d5329d OS |
555 | q->elevator = NULL; |
556 | } |