2 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
3 * fairer distribution of tags between multiple submitters when a shared tag map
6 * Copyright (C) 2013-2014 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/module.h>
11 #include <linux/blk-mq.h>
14 #include "blk-mq-tag.h"
16 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
21 return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
25 * If a previously inactive queue goes active, bump the active user count.
27 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
29 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
30 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
31 atomic_inc(&hctx->tags->active_queues);
37 * Wakeup all potentially sleeping on tags
39 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
41 sbitmap_queue_wake_all(&tags->bitmap_tags);
43 sbitmap_queue_wake_all(&tags->breserved_tags);
47 * If a previously busy queue goes inactive, potential waiters could now
48 * be allowed to queue. Wake them up and check.
50 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
52 struct blk_mq_tags *tags = hctx->tags;
54 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
57 atomic_dec(&tags->active_queues);
59 blk_mq_tag_wakeup_all(tags, false);
63 * For shared tag users, we track the number of currently active users
64 * and attempt to provide a fair share of the tag depth for each of them.
66 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
67 struct sbitmap_queue *bt)
69 unsigned int depth, users;
71 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
73 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
77 * Don't try dividing an ant
79 if (bt->sb.depth == 1)
82 users = atomic_read(&hctx->tags->active_queues);
87 * Allow at least some tags
89 depth = max((bt->sb.depth + users - 1) / users, 4U);
90 return atomic_read(&hctx->nr_active) < depth;
93 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
94 struct sbitmap_queue *bt)
96 if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
97 !hctx_may_queue(data->hctx, bt))
99 if (data->shallow_depth)
100 return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
102 return __sbitmap_queue_get(bt);
105 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
107 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
108 struct sbitmap_queue *bt;
109 struct sbq_wait_state *ws;
111 unsigned int tag_offset;
115 if (data->flags & BLK_MQ_REQ_RESERVED) {
116 if (unlikely(!tags->nr_reserved_tags)) {
118 return BLK_MQ_TAG_FAIL;
120 bt = &tags->breserved_tags;
123 bt = &tags->bitmap_tags;
124 tag_offset = tags->nr_reserved_tags;
127 tag = __blk_mq_get_tag(data, bt);
131 if (data->flags & BLK_MQ_REQ_NOWAIT)
132 return BLK_MQ_TAG_FAIL;
134 ws = bt_wait_ptr(bt, data->hctx);
135 drop_ctx = data->ctx == NULL;
137 struct sbitmap_queue *bt_prev;
140 * We're out of tags on this hardware queue, kick any
141 * pending IO submits before going to sleep waiting for
144 blk_mq_run_hw_queue(data->hctx, false);
147 * Retry tag allocation after running the hardware queue,
148 * as running the queue may also have found completions.
150 tag = __blk_mq_get_tag(data, bt);
154 prepare_to_wait_exclusive(&ws->wait, &wait,
155 TASK_UNINTERRUPTIBLE);
157 tag = __blk_mq_get_tag(data, bt);
162 blk_mq_put_ctx(data->ctx);
167 data->ctx = blk_mq_get_ctx(data->q);
168 data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
169 tags = blk_mq_tags_from_data(data);
170 if (data->flags & BLK_MQ_REQ_RESERVED)
171 bt = &tags->breserved_tags;
173 bt = &tags->bitmap_tags;
175 finish_wait(&ws->wait, &wait);
178 * If destination hw queue is changed, fake wake up on
179 * previous queue for compensating the wake up miss, so
180 * other allocations on previous queue won't be starved.
183 sbitmap_queue_wake_up(bt_prev);
185 ws = bt_wait_ptr(bt, data->hctx);
188 if (drop_ctx && data->ctx)
189 blk_mq_put_ctx(data->ctx);
191 finish_wait(&ws->wait, &wait);
194 return tag + tag_offset;
197 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
198 struct blk_mq_ctx *ctx, unsigned int tag)
200 if (!blk_mq_tag_is_reserved(tags, tag)) {
201 const int real_tag = tag - tags->nr_reserved_tags;
203 BUG_ON(real_tag >= tags->nr_tags);
204 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
206 BUG_ON(tag >= tags->nr_reserved_tags);
207 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
211 struct bt_iter_data {
212 struct blk_mq_hw_ctx *hctx;
218 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
220 struct bt_iter_data *iter_data = data;
221 struct blk_mq_hw_ctx *hctx = iter_data->hctx;
222 struct blk_mq_tags *tags = hctx->tags;
223 bool reserved = iter_data->reserved;
227 bitnr += tags->nr_reserved_tags;
228 rq = tags->rqs[bitnr];
231 * We can hit rq == NULL here, because the tagging functions
232 * test and set the bit before assining ->rqs[].
234 if (rq && rq->q == hctx->queue)
235 iter_data->fn(hctx, rq, iter_data->data, reserved);
239 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
240 busy_iter_fn *fn, void *data, bool reserved)
242 struct bt_iter_data iter_data = {
246 .reserved = reserved,
249 sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
252 struct bt_tags_iter_data {
253 struct blk_mq_tags *tags;
254 busy_tag_iter_fn *fn;
259 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
261 struct bt_tags_iter_data *iter_data = data;
262 struct blk_mq_tags *tags = iter_data->tags;
263 bool reserved = iter_data->reserved;
267 bitnr += tags->nr_reserved_tags;
270 * We can hit rq == NULL here, because the tagging functions
271 * test and set the bit before assining ->rqs[].
273 rq = tags->rqs[bitnr];
274 if (rq && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
275 iter_data->fn(rq, iter_data->data, reserved);
280 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
281 busy_tag_iter_fn *fn, void *data, bool reserved)
283 struct bt_tags_iter_data iter_data = {
287 .reserved = reserved,
291 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
294 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
295 busy_tag_iter_fn *fn, void *priv)
297 if (tags->nr_reserved_tags)
298 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
299 bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
302 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
303 busy_tag_iter_fn *fn, void *priv)
307 for (i = 0; i < tagset->nr_hw_queues; i++) {
308 if (tagset->tags && tagset->tags[i])
309 blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
312 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
314 int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
315 int (fn)(void *, struct request *))
319 if (WARN_ON_ONCE(!fn))
322 for (i = 0; i < set->nr_hw_queues; i++) {
323 struct blk_mq_tags *tags = set->tags[i];
328 for (j = 0; j < tags->nr_tags; j++) {
329 if (!tags->static_rqs[j])
332 ret = fn(data, tags->static_rqs[j]);
341 EXPORT_SYMBOL_GPL(blk_mq_tagset_iter);
343 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
346 struct blk_mq_hw_ctx *hctx;
350 queue_for_each_hw_ctx(q, hctx, i) {
351 struct blk_mq_tags *tags = hctx->tags;
354 * If not software queues are currently mapped to this
355 * hardware queue, there's nothing to check
357 if (!blk_mq_hw_queue_mapped(hctx))
360 if (tags->nr_reserved_tags)
361 bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
362 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
367 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
368 bool round_robin, int node)
370 return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
374 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
375 int node, int alloc_policy)
377 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
378 bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
380 if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
382 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
384 goto free_bitmap_tags;
388 sbitmap_queue_free(&tags->bitmap_tags);
394 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
395 unsigned int reserved_tags,
396 int node, int alloc_policy)
398 struct blk_mq_tags *tags;
400 if (total_tags > BLK_MQ_TAG_MAX) {
401 pr_err("blk-mq: tag depth too large\n");
405 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
409 tags->nr_tags = total_tags;
410 tags->nr_reserved_tags = reserved_tags;
412 return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
415 void blk_mq_free_tags(struct blk_mq_tags *tags)
417 sbitmap_queue_free(&tags->bitmap_tags);
418 sbitmap_queue_free(&tags->breserved_tags);
422 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
423 struct blk_mq_tags **tagsptr, unsigned int tdepth,
426 struct blk_mq_tags *tags = *tagsptr;
428 if (tdepth <= tags->nr_reserved_tags)
431 tdepth -= tags->nr_reserved_tags;
434 * If we are allowed to grow beyond the original size, allocate
435 * a new set of tags before freeing the old one.
437 if (tdepth > tags->nr_tags) {
438 struct blk_mq_tag_set *set = hctx->queue->tag_set;
439 struct blk_mq_tags *new;
446 * We need some sort of upper limit, set it high enough that
447 * no valid use cases should require more.
449 if (tdepth > 16 * BLKDEV_MAX_RQ)
452 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
455 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
457 blk_mq_free_rq_map(new);
461 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
462 blk_mq_free_rq_map(*tagsptr);
466 * Don't need (or can't) update reserved tags here, they
467 * remain static and should never need resizing.
469 sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
476 * blk_mq_unique_tag() - return a tag that is unique queue-wide
477 * @rq: request for which to compute a unique tag
479 * The tag field in struct request is unique per hardware queue but not over
480 * all hardware queues. Hence this function that returns a tag with the
481 * hardware context index in the upper bits and the per hardware queue tag in
484 * Note: When called for a request that is queued on a non-multiqueue request
485 * queue, the hardware context index is set to zero.
487 u32 blk_mq_unique_tag(struct request *rq)
489 struct request_queue *q = rq->q;
490 struct blk_mq_hw_ctx *hctx;
494 hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
495 hwq = hctx->queue_num;
498 return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
499 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
501 EXPORT_SYMBOL(blk_mq_unique_tag);