Commit | Line | Data |
---|---|---|
bd166ef1 JA |
1 | /* |
2 | * blk-mq scheduling framework | |
3 | * | |
4 | * Copyright (C) 2016 Jens Axboe | |
5 | */ | |
6 | #include <linux/kernel.h> | |
7 | #include <linux/module.h> | |
8 | #include <linux/blk-mq.h> | |
9 | ||
10 | #include <trace/events/block.h> | |
11 | ||
12 | #include "blk.h" | |
13 | #include "blk-mq.h" | |
d332ce09 | 14 | #include "blk-mq-debugfs.h" |
bd166ef1 JA |
15 | #include "blk-mq-sched.h" |
16 | #include "blk-mq-tag.h" | |
17 | #include "blk-wbt.h" | |
18 | ||
19 | void blk_mq_sched_free_hctx_data(struct request_queue *q, | |
20 | void (*exit)(struct blk_mq_hw_ctx *)) | |
21 | { | |
22 | struct blk_mq_hw_ctx *hctx; | |
23 | int i; | |
24 | ||
25 | queue_for_each_hw_ctx(q, hctx, i) { | |
26 | if (exit && hctx->sched_data) | |
27 | exit(hctx); | |
28 | kfree(hctx->sched_data); | |
29 | hctx->sched_data = NULL; | |
30 | } | |
31 | } | |
32 | EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); | |
33 | ||
44e8c2bf | 34 | void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio) |
bd166ef1 | 35 | { |
44e8c2bf CH |
36 | struct request_queue *q = rq->q; |
37 | struct io_context *ioc = rq_ioc(bio); | |
bd166ef1 JA |
38 | struct io_cq *icq; |
39 | ||
40 | spin_lock_irq(q->queue_lock); | |
41 | icq = ioc_lookup_icq(ioc, q); | |
42 | spin_unlock_irq(q->queue_lock); | |
43 | ||
44 | if (!icq) { | |
45 | icq = ioc_create_icq(ioc, q, GFP_ATOMIC); | |
46 | if (!icq) | |
47 | return; | |
48 | } | |
ea511e3c | 49 | get_io_context(icq->ioc); |
44e8c2bf | 50 | rq->elv.icq = icq; |
bd166ef1 JA |
51 | } |
52 | ||
8e8320c9 JA |
53 | /* |
54 | * Mark a hardware queue as needing a restart. For shared queues, maintain | |
55 | * a count of how many hardware queues are marked for restart. | |
56 | */ | |
57 | static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) | |
58 | { | |
59 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
60 | return; | |
61 | ||
62 | if (hctx->flags & BLK_MQ_F_TAG_SHARED) { | |
63 | struct request_queue *q = hctx->queue; | |
64 | ||
65 | if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
66 | atomic_inc(&q->shared_hctx_restart); | |
67 | } else | |
68 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | |
69 | } | |
70 | ||
358a3a6b | 71 | void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) |
8e8320c9 JA |
72 | { |
73 | if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
358a3a6b | 74 | return; |
8e8320c9 | 75 | |
358a3a6b | 76 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
8e8320c9 JA |
77 | |
78 | if (blk_mq_hctx_has_pending(hctx)) { | |
79 | blk_mq_run_hw_queue(hctx, true); | |
358a3a6b | 80 | return; |
8e8320c9 | 81 | } |
8e8320c9 JA |
82 | } |
83 | ||
de148297 ML |
84 | /* return true if hctx need to run again */ |
85 | static bool blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) | |
caf8eb0d ML |
86 | { |
87 | struct request_queue *q = hctx->queue; | |
88 | struct elevator_queue *e = q->elevator; | |
89 | LIST_HEAD(rq_list); | |
90 | ||
91 | do { | |
de148297 ML |
92 | struct request *rq; |
93 | blk_status_t ret; | |
caf8eb0d | 94 | |
de148297 ML |
95 | if (e->type->ops.mq.has_work && |
96 | !e->type->ops.mq.has_work(hctx)) | |
caf8eb0d | 97 | break; |
de148297 ML |
98 | |
99 | ret = blk_mq_get_dispatch_budget(hctx); | |
100 | if (ret == BLK_STS_RESOURCE) | |
101 | return true; | |
102 | ||
103 | rq = e->type->ops.mq.dispatch_request(hctx); | |
104 | if (!rq) { | |
105 | blk_mq_put_dispatch_budget(hctx); | |
106 | break; | |
107 | } else if (ret != BLK_STS_OK) { | |
108 | blk_mq_end_request(rq, ret); | |
109 | continue; | |
110 | } | |
111 | ||
112 | /* | |
113 | * Now this rq owns the budget which has to be released | |
114 | * if this rq won't be queued to driver via .queue_rq() | |
115 | * in blk_mq_dispatch_rq_list(). | |
116 | */ | |
caf8eb0d | 117 | list_add(&rq->queuelist, &rq_list); |
de148297 ML |
118 | } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); |
119 | ||
120 | return false; | |
caf8eb0d ML |
121 | } |
122 | ||
b347689f ML |
123 | static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, |
124 | struct blk_mq_ctx *ctx) | |
125 | { | |
126 | unsigned idx = ctx->index_hw; | |
127 | ||
128 | if (++idx == hctx->nr_ctx) | |
129 | idx = 0; | |
130 | ||
131 | return hctx->ctxs[idx]; | |
132 | } | |
133 | ||
134 | /* return true if hctx need to run again */ | |
135 | static bool blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) | |
136 | { | |
137 | struct request_queue *q = hctx->queue; | |
138 | LIST_HEAD(rq_list); | |
139 | struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); | |
140 | ||
141 | do { | |
142 | struct request *rq; | |
143 | blk_status_t ret; | |
144 | ||
145 | if (!sbitmap_any_bit_set(&hctx->ctx_map)) | |
146 | break; | |
147 | ||
148 | ret = blk_mq_get_dispatch_budget(hctx); | |
149 | if (ret == BLK_STS_RESOURCE) | |
150 | return true; | |
151 | ||
152 | rq = blk_mq_dequeue_from_ctx(hctx, ctx); | |
153 | if (!rq) { | |
154 | blk_mq_put_dispatch_budget(hctx); | |
155 | break; | |
156 | } else if (ret != BLK_STS_OK) { | |
157 | blk_mq_end_request(rq, ret); | |
158 | continue; | |
159 | } | |
160 | ||
161 | /* | |
162 | * Now this rq owns the budget which has to be released | |
163 | * if this rq won't be queued to driver via .queue_rq() | |
164 | * in blk_mq_dispatch_rq_list(). | |
165 | */ | |
166 | list_add(&rq->queuelist, &rq_list); | |
167 | ||
168 | /* round robin for fair dispatch */ | |
169 | ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); | |
170 | ||
171 | } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); | |
172 | ||
173 | WRITE_ONCE(hctx->dispatch_from, ctx); | |
174 | ||
175 | return false; | |
176 | } | |
177 | ||
de148297 ML |
178 | /* return true if hw queue need to be run again */ |
179 | bool blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) | |
bd166ef1 | 180 | { |
81380ca1 OS |
181 | struct request_queue *q = hctx->queue; |
182 | struct elevator_queue *e = q->elevator; | |
64765a75 | 183 | const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; |
bd166ef1 | 184 | LIST_HEAD(rq_list); |
de148297 | 185 | bool run_queue = false; |
bd166ef1 | 186 | |
f4560ffe ML |
187 | /* RCU or SRCU read lock is needed before checking quiesced flag */ |
188 | if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) | |
de148297 | 189 | return false; |
bd166ef1 JA |
190 | |
191 | hctx->run++; | |
192 | ||
193 | /* | |
194 | * If we have previous entries on our dispatch list, grab them first for | |
195 | * more fair dispatch. | |
196 | */ | |
197 | if (!list_empty_careful(&hctx->dispatch)) { | |
198 | spin_lock(&hctx->lock); | |
199 | if (!list_empty(&hctx->dispatch)) | |
200 | list_splice_init(&hctx->dispatch, &rq_list); | |
201 | spin_unlock(&hctx->lock); | |
202 | } | |
203 | ||
204 | /* | |
205 | * Only ask the scheduler for requests, if we didn't have residual | |
206 | * requests from the dispatch list. This is to avoid the case where | |
207 | * we only ever dispatch a fraction of the requests available because | |
208 | * of low device queue depth. Once we pull requests out of the IO | |
209 | * scheduler, we can no longer merge or sort them. So it's best to | |
210 | * leave them there for as long as we can. Mark the hw queue as | |
211 | * needing a restart in that case. | |
caf8eb0d ML |
212 | * |
213 | * We want to dispatch from the scheduler if there was nothing | |
214 | * on the dispatch list or we were able to dispatch from the | |
215 | * dispatch list. | |
bd166ef1 | 216 | */ |
c13660a0 | 217 | if (!list_empty(&rq_list)) { |
d38d3515 | 218 | blk_mq_sched_mark_restart_hctx(hctx); |
b347689f ML |
219 | if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { |
220 | if (has_sched_dispatch) | |
221 | run_queue = blk_mq_do_dispatch_sched(hctx); | |
222 | else | |
223 | run_queue = blk_mq_do_dispatch_ctx(hctx); | |
224 | } | |
caf8eb0d | 225 | } else if (has_sched_dispatch) { |
de148297 | 226 | run_queue = blk_mq_do_dispatch_sched(hctx); |
b347689f ML |
227 | } else if (q->mq_ops->get_budget) { |
228 | /* | |
229 | * If we need to get budget before queuing request, we | |
230 | * dequeue request one by one from sw queue for avoiding | |
231 | * to mess up I/O merge when dispatch runs out of resource. | |
232 | * | |
233 | * TODO: get more budgets, and dequeue more requests in | |
234 | * one time. | |
235 | */ | |
236 | run_queue = blk_mq_do_dispatch_ctx(hctx); | |
caf8eb0d | 237 | } else { |
c13660a0 | 238 | blk_mq_flush_busy_ctxs(hctx, &rq_list); |
de148297 | 239 | blk_mq_dispatch_rq_list(q, &rq_list, false); |
64765a75 | 240 | } |
de148297 ML |
241 | |
242 | if (run_queue && !blk_mq_sched_needs_restart(hctx) && | |
243 | !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state)) { | |
244 | blk_mq_sched_mark_restart_hctx(hctx); | |
245 | return true; | |
246 | } | |
247 | ||
248 | return false; | |
bd166ef1 JA |
249 | } |
250 | ||
e4d750c9 JA |
251 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, |
252 | struct request **merged_request) | |
bd166ef1 JA |
253 | { |
254 | struct request *rq; | |
bd166ef1 | 255 | |
34fe7c05 CH |
256 | switch (elv_merge(q, &rq, bio)) { |
257 | case ELEVATOR_BACK_MERGE: | |
bd166ef1 JA |
258 | if (!blk_mq_sched_allow_merge(q, rq, bio)) |
259 | return false; | |
34fe7c05 CH |
260 | if (!bio_attempt_back_merge(q, rq, bio)) |
261 | return false; | |
262 | *merged_request = attempt_back_merge(q, rq); | |
263 | if (!*merged_request) | |
264 | elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); | |
265 | return true; | |
266 | case ELEVATOR_FRONT_MERGE: | |
bd166ef1 JA |
267 | if (!blk_mq_sched_allow_merge(q, rq, bio)) |
268 | return false; | |
34fe7c05 CH |
269 | if (!bio_attempt_front_merge(q, rq, bio)) |
270 | return false; | |
271 | *merged_request = attempt_front_merge(q, rq); | |
272 | if (!*merged_request) | |
273 | elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); | |
274 | return true; | |
275 | default: | |
276 | return false; | |
bd166ef1 | 277 | } |
bd166ef1 JA |
278 | } |
279 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); | |
280 | ||
9bddeb2a ML |
281 | /* |
282 | * Reverse check our software queue for entries that we could potentially | |
283 | * merge with. Currently includes a hand-wavy stop count of 8, to not spend | |
284 | * too much time checking for merges. | |
285 | */ | |
286 | static bool blk_mq_attempt_merge(struct request_queue *q, | |
287 | struct blk_mq_ctx *ctx, struct bio *bio) | |
288 | { | |
289 | struct request *rq; | |
290 | int checked = 8; | |
291 | ||
7b607814 BVA |
292 | lockdep_assert_held(&ctx->lock); |
293 | ||
9bddeb2a ML |
294 | list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { |
295 | bool merged = false; | |
296 | ||
297 | if (!checked--) | |
298 | break; | |
299 | ||
300 | if (!blk_rq_merge_ok(rq, bio)) | |
301 | continue; | |
302 | ||
303 | switch (blk_try_merge(rq, bio)) { | |
304 | case ELEVATOR_BACK_MERGE: | |
305 | if (blk_mq_sched_allow_merge(q, rq, bio)) | |
306 | merged = bio_attempt_back_merge(q, rq, bio); | |
307 | break; | |
308 | case ELEVATOR_FRONT_MERGE: | |
309 | if (blk_mq_sched_allow_merge(q, rq, bio)) | |
310 | merged = bio_attempt_front_merge(q, rq, bio); | |
311 | break; | |
312 | case ELEVATOR_DISCARD_MERGE: | |
313 | merged = bio_attempt_discard_merge(q, rq, bio); | |
314 | break; | |
315 | default: | |
316 | continue; | |
317 | } | |
318 | ||
319 | if (merged) | |
320 | ctx->rq_merged++; | |
321 | return merged; | |
322 | } | |
323 | ||
324 | return false; | |
325 | } | |
326 | ||
bd166ef1 JA |
327 | bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) |
328 | { | |
329 | struct elevator_queue *e = q->elevator; | |
9bddeb2a ML |
330 | struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); |
331 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | |
332 | bool ret = false; | |
bd166ef1 | 333 | |
9bddeb2a | 334 | if (e && e->type->ops.mq.bio_merge) { |
bd166ef1 JA |
335 | blk_mq_put_ctx(ctx); |
336 | return e->type->ops.mq.bio_merge(hctx, bio); | |
337 | } | |
338 | ||
9bddeb2a ML |
339 | if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) { |
340 | /* default per sw-queue merge */ | |
341 | spin_lock(&ctx->lock); | |
342 | ret = blk_mq_attempt_merge(q, ctx, bio); | |
343 | spin_unlock(&ctx->lock); | |
344 | } | |
345 | ||
346 | blk_mq_put_ctx(ctx); | |
347 | return ret; | |
bd166ef1 JA |
348 | } |
349 | ||
350 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) | |
351 | { | |
352 | return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); | |
353 | } | |
354 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); | |
355 | ||
356 | void blk_mq_sched_request_inserted(struct request *rq) | |
357 | { | |
358 | trace_block_rq_insert(rq->q, rq); | |
359 | } | |
360 | EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); | |
361 | ||
0cacba6c OS |
362 | static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, |
363 | struct request *rq) | |
bd166ef1 JA |
364 | { |
365 | if (rq->tag == -1) { | |
366 | rq->rq_flags |= RQF_SORTED; | |
367 | return false; | |
368 | } | |
369 | ||
370 | /* | |
371 | * If we already have a real request tag, send directly to | |
372 | * the dispatch list. | |
373 | */ | |
374 | spin_lock(&hctx->lock); | |
375 | list_add(&rq->queuelist, &hctx->dispatch); | |
376 | spin_unlock(&hctx->lock); | |
377 | return true; | |
378 | } | |
bd166ef1 | 379 | |
bd6737f1 JA |
380 | /* |
381 | * Add flush/fua to the queue. If we fail getting a driver tag, then | |
382 | * punt to the requeue list. Requeue will re-invoke us from a context | |
383 | * that's safe to block from. | |
384 | */ | |
385 | static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx, | |
386 | struct request *rq, bool can_block) | |
387 | { | |
388 | if (blk_mq_get_driver_tag(rq, &hctx, can_block)) { | |
389 | blk_insert_flush(rq); | |
390 | blk_mq_run_hw_queue(hctx, true); | |
391 | } else | |
c7a571b4 | 392 | blk_mq_add_to_requeue_list(rq, false, true); |
bd6737f1 JA |
393 | } |
394 | ||
395 | void blk_mq_sched_insert_request(struct request *rq, bool at_head, | |
396 | bool run_queue, bool async, bool can_block) | |
397 | { | |
398 | struct request_queue *q = rq->q; | |
399 | struct elevator_queue *e = q->elevator; | |
400 | struct blk_mq_ctx *ctx = rq->mq_ctx; | |
401 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | |
402 | ||
f3a8ab7d | 403 | if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) { |
bd6737f1 JA |
404 | blk_mq_sched_insert_flush(hctx, rq, can_block); |
405 | return; | |
406 | } | |
407 | ||
0cacba6c OS |
408 | if (e && blk_mq_sched_bypass_insert(hctx, rq)) |
409 | goto run; | |
410 | ||
bd6737f1 JA |
411 | if (e && e->type->ops.mq.insert_requests) { |
412 | LIST_HEAD(list); | |
413 | ||
414 | list_add(&rq->queuelist, &list); | |
415 | e->type->ops.mq.insert_requests(hctx, &list, at_head); | |
416 | } else { | |
417 | spin_lock(&ctx->lock); | |
418 | __blk_mq_insert_request(hctx, rq, at_head); | |
419 | spin_unlock(&ctx->lock); | |
420 | } | |
421 | ||
0cacba6c | 422 | run: |
bd6737f1 JA |
423 | if (run_queue) |
424 | blk_mq_run_hw_queue(hctx, async); | |
425 | } | |
426 | ||
427 | void blk_mq_sched_insert_requests(struct request_queue *q, | |
428 | struct blk_mq_ctx *ctx, | |
429 | struct list_head *list, bool run_queue_async) | |
430 | { | |
431 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | |
432 | struct elevator_queue *e = hctx->queue->elevator; | |
433 | ||
0cacba6c OS |
434 | if (e) { |
435 | struct request *rq, *next; | |
436 | ||
437 | /* | |
438 | * We bypass requests that already have a driver tag assigned, | |
439 | * which should only be flushes. Flushes are only ever inserted | |
440 | * as single requests, so we shouldn't ever hit the | |
441 | * WARN_ON_ONCE() below (but let's handle it just in case). | |
442 | */ | |
443 | list_for_each_entry_safe(rq, next, list, queuelist) { | |
444 | if (WARN_ON_ONCE(rq->tag != -1)) { | |
445 | list_del_init(&rq->queuelist); | |
446 | blk_mq_sched_bypass_insert(hctx, rq); | |
447 | } | |
448 | } | |
449 | } | |
450 | ||
bd6737f1 JA |
451 | if (e && e->type->ops.mq.insert_requests) |
452 | e->type->ops.mq.insert_requests(hctx, list, false); | |
453 | else | |
454 | blk_mq_insert_requests(hctx, ctx, list); | |
455 | ||
456 | blk_mq_run_hw_queue(hctx, run_queue_async); | |
457 | } | |
458 | ||
bd166ef1 JA |
459 | static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, |
460 | struct blk_mq_hw_ctx *hctx, | |
461 | unsigned int hctx_idx) | |
462 | { | |
463 | if (hctx->sched_tags) { | |
464 | blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); | |
465 | blk_mq_free_rq_map(hctx->sched_tags); | |
466 | hctx->sched_tags = NULL; | |
467 | } | |
468 | } | |
469 | ||
6917ff0b OS |
470 | static int blk_mq_sched_alloc_tags(struct request_queue *q, |
471 | struct blk_mq_hw_ctx *hctx, | |
472 | unsigned int hctx_idx) | |
473 | { | |
474 | struct blk_mq_tag_set *set = q->tag_set; | |
475 | int ret; | |
476 | ||
477 | hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, | |
478 | set->reserved_tags); | |
479 | if (!hctx->sched_tags) | |
480 | return -ENOMEM; | |
481 | ||
482 | ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); | |
483 | if (ret) | |
484 | blk_mq_sched_free_tags(set, hctx, hctx_idx); | |
485 | ||
486 | return ret; | |
487 | } | |
488 | ||
54d5329d | 489 | static void blk_mq_sched_tags_teardown(struct request_queue *q) |
bd166ef1 JA |
490 | { |
491 | struct blk_mq_tag_set *set = q->tag_set; | |
492 | struct blk_mq_hw_ctx *hctx; | |
6917ff0b OS |
493 | int i; |
494 | ||
495 | queue_for_each_hw_ctx(q, hctx, i) | |
496 | blk_mq_sched_free_tags(set, hctx, i); | |
497 | } | |
498 | ||
93252632 OS |
499 | int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, |
500 | unsigned int hctx_idx) | |
501 | { | |
502 | struct elevator_queue *e = q->elevator; | |
ee056f98 | 503 | int ret; |
93252632 OS |
504 | |
505 | if (!e) | |
506 | return 0; | |
507 | ||
ee056f98 OS |
508 | ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx); |
509 | if (ret) | |
510 | return ret; | |
511 | ||
512 | if (e->type->ops.mq.init_hctx) { | |
513 | ret = e->type->ops.mq.init_hctx(hctx, hctx_idx); | |
514 | if (ret) { | |
515 | blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); | |
516 | return ret; | |
517 | } | |
518 | } | |
519 | ||
d332ce09 OS |
520 | blk_mq_debugfs_register_sched_hctx(q, hctx); |
521 | ||
ee056f98 | 522 | return 0; |
93252632 OS |
523 | } |
524 | ||
525 | void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, | |
526 | unsigned int hctx_idx) | |
527 | { | |
528 | struct elevator_queue *e = q->elevator; | |
529 | ||
530 | if (!e) | |
531 | return; | |
532 | ||
d332ce09 OS |
533 | blk_mq_debugfs_unregister_sched_hctx(hctx); |
534 | ||
ee056f98 OS |
535 | if (e->type->ops.mq.exit_hctx && hctx->sched_data) { |
536 | e->type->ops.mq.exit_hctx(hctx, hctx_idx); | |
537 | hctx->sched_data = NULL; | |
538 | } | |
539 | ||
93252632 OS |
540 | blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); |
541 | } | |
542 | ||
6917ff0b OS |
543 | int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) |
544 | { | |
545 | struct blk_mq_hw_ctx *hctx; | |
ee056f98 | 546 | struct elevator_queue *eq; |
6917ff0b OS |
547 | unsigned int i; |
548 | int ret; | |
549 | ||
550 | if (!e) { | |
551 | q->elevator = NULL; | |
552 | return 0; | |
553 | } | |
bd166ef1 JA |
554 | |
555 | /* | |
32825c45 ML |
556 | * Default to double of smaller one between hw queue_depth and 128, |
557 | * since we don't split into sync/async like the old code did. | |
558 | * Additionally, this is a per-hw queue depth. | |
bd166ef1 | 559 | */ |
32825c45 ML |
560 | q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, |
561 | BLKDEV_MAX_RQ); | |
bd166ef1 | 562 | |
bd166ef1 | 563 | queue_for_each_hw_ctx(q, hctx, i) { |
6917ff0b | 564 | ret = blk_mq_sched_alloc_tags(q, hctx, i); |
bd166ef1 | 565 | if (ret) |
6917ff0b | 566 | goto err; |
bd166ef1 JA |
567 | } |
568 | ||
6917ff0b OS |
569 | ret = e->ops.mq.init_sched(q, e); |
570 | if (ret) | |
571 | goto err; | |
bd166ef1 | 572 | |
d332ce09 OS |
573 | blk_mq_debugfs_register_sched(q); |
574 | ||
575 | queue_for_each_hw_ctx(q, hctx, i) { | |
576 | if (e->ops.mq.init_hctx) { | |
ee056f98 OS |
577 | ret = e->ops.mq.init_hctx(hctx, i); |
578 | if (ret) { | |
579 | eq = q->elevator; | |
580 | blk_mq_exit_sched(q, eq); | |
581 | kobject_put(&eq->kobj); | |
582 | return ret; | |
583 | } | |
584 | } | |
d332ce09 | 585 | blk_mq_debugfs_register_sched_hctx(q, hctx); |
ee056f98 OS |
586 | } |
587 | ||
bd166ef1 | 588 | return 0; |
bd166ef1 | 589 | |
6917ff0b | 590 | err: |
54d5329d OS |
591 | blk_mq_sched_tags_teardown(q); |
592 | q->elevator = NULL; | |
6917ff0b | 593 | return ret; |
bd166ef1 | 594 | } |
d3484991 | 595 | |
54d5329d OS |
596 | void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) |
597 | { | |
ee056f98 OS |
598 | struct blk_mq_hw_ctx *hctx; |
599 | unsigned int i; | |
600 | ||
d332ce09 OS |
601 | queue_for_each_hw_ctx(q, hctx, i) { |
602 | blk_mq_debugfs_unregister_sched_hctx(hctx); | |
603 | if (e->type->ops.mq.exit_hctx && hctx->sched_data) { | |
604 | e->type->ops.mq.exit_hctx(hctx, i); | |
605 | hctx->sched_data = NULL; | |
ee056f98 OS |
606 | } |
607 | } | |
d332ce09 | 608 | blk_mq_debugfs_unregister_sched(q); |
54d5329d OS |
609 | if (e->type->ops.mq.exit_sched) |
610 | e->type->ops.mq.exit_sched(e); | |
611 | blk_mq_sched_tags_teardown(q); | |
612 | q->elevator = NULL; | |
613 | } | |
614 | ||
d3484991 JA |
615 | int blk_mq_sched_init(struct request_queue *q) |
616 | { | |
617 | int ret; | |
618 | ||
d3484991 JA |
619 | mutex_lock(&q->sysfs_lock); |
620 | ret = elevator_init(q, NULL); | |
621 | mutex_unlock(&q->sysfs_lock); | |
622 | ||
623 | return ret; | |
624 | } |