2 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
3 * fairer distribution of tags between multiple submitters when a shared tag map
6 * Copyright (C) 2013-2014 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/random.h>
12 #include <linux/blk-mq.h>
15 #include "blk-mq-tag.h"
17 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
22 return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
26 * If a previously inactive queue goes active, bump the active user count.
28 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
30 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
31 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
32 atomic_inc(&hctx->tags->active_queues);
38 * Wakeup all potentially sleeping on tags
40 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
42 sbitmap_queue_wake_all(&tags->bitmap_tags);
44 sbitmap_queue_wake_all(&tags->breserved_tags);
48 * If a previously busy queue goes inactive, potential waiters could now
49 * be allowed to queue. Wake them up and check.
51 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
53 struct blk_mq_tags *tags = hctx->tags;
55 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
58 atomic_dec(&tags->active_queues);
60 blk_mq_tag_wakeup_all(tags, false);
64 * For shared tag users, we track the number of currently active users
65 * and attempt to provide a fair share of the tag depth for each of them.
67 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
68 struct sbitmap_queue *bt)
70 unsigned int depth, users;
72 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
74 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
78 * Don't try dividing an ant
80 if (bt->sb.depth == 1)
83 users = atomic_read(&hctx->tags->active_queues);
88 * Allow at least some tags
90 depth = max((bt->sb.depth + users - 1) / users, 4U);
91 return atomic_read(&hctx->nr_active) < depth;
94 #define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
96 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
97 unsigned int *tag_cache, struct blk_mq_tags *tags)
99 unsigned int last_tag;
102 if (!hctx_may_queue(hctx, bt))
105 last_tag = *tag_cache;
106 tag = sbitmap_get(&bt->sb, last_tag, BT_ALLOC_RR(tags));
110 } else if (tag == last_tag || unlikely(BT_ALLOC_RR(tags))) {
112 if (last_tag >= bt->sb.depth - 1)
114 *tag_cache = last_tag;
120 static int bt_get(struct blk_mq_alloc_data *data,
121 struct sbitmap_queue *bt,
122 struct blk_mq_hw_ctx *hctx,
123 unsigned int *last_tag, struct blk_mq_tags *tags)
125 struct sbq_wait_state *ws;
129 tag = __bt_get(hctx, bt, last_tag, tags);
133 if (data->flags & BLK_MQ_REQ_NOWAIT)
136 ws = bt_wait_ptr(bt, hctx);
138 prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
140 tag = __bt_get(hctx, bt, last_tag, tags);
145 * We're out of tags on this hardware queue, kick any
146 * pending IO submits before going to sleep waiting for
147 * some to complete. Note that hctx can be NULL here for
148 * reserved tag allocation.
151 blk_mq_run_hw_queue(hctx, false);
154 * Retry tag allocation after running the hardware queue,
155 * as running the queue may also have found completions.
157 tag = __bt_get(hctx, bt, last_tag, tags);
161 blk_mq_put_ctx(data->ctx);
165 data->ctx = blk_mq_get_ctx(data->q);
166 data->hctx = data->q->mq_ops->map_queue(data->q,
168 if (data->flags & BLK_MQ_REQ_RESERVED) {
169 bt = &data->hctx->tags->breserved_tags;
171 last_tag = &data->ctx->last_tag;
173 bt = &hctx->tags->bitmap_tags;
175 finish_wait(&ws->wait, &wait);
176 ws = bt_wait_ptr(bt, hctx);
179 finish_wait(&ws->wait, &wait);
183 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
187 tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
188 &data->ctx->last_tag, data->hctx->tags);
190 return tag + data->hctx->tags->nr_reserved_tags;
192 return BLK_MQ_TAG_FAIL;
195 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
199 if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
201 return BLK_MQ_TAG_FAIL;
204 tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
207 return BLK_MQ_TAG_FAIL;
212 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
214 if (data->flags & BLK_MQ_REQ_RESERVED)
215 return __blk_mq_get_reserved_tag(data);
216 return __blk_mq_get_tag(data);
219 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
220 unsigned int *last_tag)
222 struct blk_mq_tags *tags = hctx->tags;
224 if (tag >= tags->nr_reserved_tags) {
225 const int real_tag = tag - tags->nr_reserved_tags;
227 BUG_ON(real_tag >= tags->nr_tags);
228 sbitmap_queue_clear(&tags->bitmap_tags, real_tag);
229 if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
230 *last_tag = real_tag;
232 BUG_ON(tag >= tags->nr_reserved_tags);
233 sbitmap_queue_clear(&tags->breserved_tags, tag);
237 struct bt_iter_data {
238 struct blk_mq_hw_ctx *hctx;
244 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
246 struct bt_iter_data *iter_data = data;
247 struct blk_mq_hw_ctx *hctx = iter_data->hctx;
248 struct blk_mq_tags *tags = hctx->tags;
249 bool reserved = iter_data->reserved;
253 bitnr += tags->nr_reserved_tags;
254 rq = tags->rqs[bitnr];
256 if (rq->q == hctx->queue)
257 iter_data->fn(hctx, rq, iter_data->data, reserved);
261 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
262 busy_iter_fn *fn, void *data, bool reserved)
264 struct bt_iter_data iter_data = {
268 .reserved = reserved,
271 sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
274 struct bt_tags_iter_data {
275 struct blk_mq_tags *tags;
276 busy_tag_iter_fn *fn;
281 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
283 struct bt_tags_iter_data *iter_data = data;
284 struct blk_mq_tags *tags = iter_data->tags;
285 bool reserved = iter_data->reserved;
289 bitnr += tags->nr_reserved_tags;
290 rq = tags->rqs[bitnr];
292 iter_data->fn(rq, iter_data->data, reserved);
296 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
297 busy_tag_iter_fn *fn, void *data, bool reserved)
299 struct bt_tags_iter_data iter_data = {
303 .reserved = reserved,
307 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
310 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
311 busy_tag_iter_fn *fn, void *priv)
313 if (tags->nr_reserved_tags)
314 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
315 bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
318 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
319 busy_tag_iter_fn *fn, void *priv)
323 for (i = 0; i < tagset->nr_hw_queues; i++) {
324 if (tagset->tags && tagset->tags[i])
325 blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
328 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
330 int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
334 if (!set->ops->reinit_request)
337 for (i = 0; i < set->nr_hw_queues; i++) {
338 struct blk_mq_tags *tags = set->tags[i];
340 for (j = 0; j < tags->nr_tags; j++) {
344 ret = set->ops->reinit_request(set->driver_data,
354 EXPORT_SYMBOL_GPL(blk_mq_reinit_tagset);
356 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
359 struct blk_mq_hw_ctx *hctx;
363 queue_for_each_hw_ctx(q, hctx, i) {
364 struct blk_mq_tags *tags = hctx->tags;
367 * If not software queues are currently mapped to this
368 * hardware queue, there's nothing to check
370 if (!blk_mq_hw_queue_mapped(hctx))
373 if (tags->nr_reserved_tags)
374 bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
375 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
380 static unsigned int bt_unused_tags(const struct sbitmap_queue *bt)
382 return bt->sb.depth - sbitmap_weight(&bt->sb);
385 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, int node)
387 return sbitmap_queue_init_node(bt, depth, -1, GFP_KERNEL, node);
390 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
391 int node, int alloc_policy)
393 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
395 tags->alloc_policy = alloc_policy;
397 if (bt_alloc(&tags->bitmap_tags, depth, node))
399 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node))
400 goto free_bitmap_tags;
404 sbitmap_queue_free(&tags->bitmap_tags);
410 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
411 unsigned int reserved_tags,
412 int node, int alloc_policy)
414 struct blk_mq_tags *tags;
416 if (total_tags > BLK_MQ_TAG_MAX) {
417 pr_err("blk-mq: tag depth too large\n");
421 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
425 if (!zalloc_cpumask_var(&tags->cpumask, GFP_KERNEL)) {
430 tags->nr_tags = total_tags;
431 tags->nr_reserved_tags = reserved_tags;
433 return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
436 void blk_mq_free_tags(struct blk_mq_tags *tags)
438 sbitmap_queue_free(&tags->bitmap_tags);
439 sbitmap_queue_free(&tags->breserved_tags);
440 free_cpumask_var(tags->cpumask);
444 void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
446 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
448 *tag = prandom_u32() % depth;
451 int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
453 tdepth -= tags->nr_reserved_tags;
454 if (tdepth > tags->nr_tags)
458 * Don't need (or can't) update reserved tags here, they remain
459 * static and should never need resizing.
461 sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
463 blk_mq_tag_wakeup_all(tags, false);
468 * blk_mq_unique_tag() - return a tag that is unique queue-wide
469 * @rq: request for which to compute a unique tag
471 * The tag field in struct request is unique per hardware queue but not over
472 * all hardware queues. Hence this function that returns a tag with the
473 * hardware context index in the upper bits and the per hardware queue tag in
476 * Note: When called for a request that is queued on a non-multiqueue request
477 * queue, the hardware context index is set to zero.
479 u32 blk_mq_unique_tag(struct request *rq)
481 struct request_queue *q = rq->q;
482 struct blk_mq_hw_ctx *hctx;
486 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
487 hwq = hctx->queue_num;
490 return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
491 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
493 EXPORT_SYMBOL(blk_mq_unique_tag);
495 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
497 char *orig_page = page;
498 unsigned int free, res;
503 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
504 "bits_per_word=%u\n",
505 tags->nr_tags, tags->nr_reserved_tags,
506 1U << tags->bitmap_tags.sb.shift);
508 free = bt_unused_tags(&tags->bitmap_tags);
509 res = bt_unused_tags(&tags->breserved_tags);
511 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
512 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
514 return page - orig_page;