1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/random.h>
5 #include <linux/blk-mq.h>
8 #include "blk-mq-tag.h"
10 void blk_mq_wait_for_tags(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx,
15 tag = blk_mq_get_tag(tags, hctx, &zero, __GFP_WAIT, reserved);
16 blk_mq_put_tag(tags, tag, &zero);
19 static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
23 for (i = 0; i < bt->map_nr; i++) {
24 struct blk_mq_bitmap *bm = &bt->map[i];
27 ret = find_first_zero_bit(&bm->word, bm->depth);
35 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
40 return bt_has_free_tags(&tags->bitmap_tags);
43 static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag)
45 int tag, org_last_tag, end;
47 org_last_tag = last_tag;
51 tag = find_next_zero_bit(&bm->word, end, last_tag);
52 if (unlikely(tag >= end)) {
54 * We started with an offset, start from 0 to
57 if (org_last_tag && last_tag) {
65 } while (test_and_set_bit_lock(tag, &bm->word));
71 * Straight forward bitmap tag implementation, where each bit is a tag
72 * (cleared == free, and set == busy). The small twist is using per-cpu
73 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
74 * contexts. This enables us to drastically limit the space searched,
75 * without dirtying an extra shared cacheline like we would if we stored
76 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
77 * of that, each word of tags is in a separate cacheline. This means that
78 * multiple users will tend to stick to different cachelines, at least
79 * until the map is exhausted.
81 static int __bt_get(struct blk_mq_bitmap_tags *bt, unsigned int *tag_cache)
83 unsigned int last_tag, org_last_tag;
86 last_tag = org_last_tag = *tag_cache;
87 index = TAG_TO_INDEX(bt, last_tag);
89 for (i = 0; i < bt->map_nr; i++) {
90 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
92 tag += (index << bt->bits_per_word);
97 if (++index >= bt->map_nr)
105 * Only update the cache from the allocation path, if we ended
106 * up using the specific cached tag.
109 if (tag == org_last_tag) {
111 if (last_tag >= bt->depth - 1)
114 *tag_cache = last_tag;
120 static inline void bt_index_inc(unsigned int *index)
122 *index = (*index + 1) & (BT_WAIT_QUEUES - 1);
125 static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
126 struct blk_mq_hw_ctx *hctx)
128 struct bt_wait_state *bs;
133 bs = &bt->bs[hctx->wait_index];
134 bt_index_inc(&hctx->wait_index);
138 static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx,
139 unsigned int *last_tag, gfp_t gfp)
141 struct bt_wait_state *bs;
145 tag = __bt_get(bt, last_tag);
149 if (!(gfp & __GFP_WAIT))
152 bs = bt_wait_ptr(bt, hctx);
156 was_empty = list_empty(&wait.task_list);
157 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
159 tag = __bt_get(bt, last_tag);
164 atomic_set(&bs->wait_cnt, bt->wake_cnt);
169 finish_wait(&bs->wait, &wait);
173 static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags,
174 struct blk_mq_hw_ctx *hctx,
175 unsigned int *last_tag, gfp_t gfp)
179 tag = bt_get(&tags->bitmap_tags, hctx, last_tag, gfp);
181 return tag + tags->nr_reserved_tags;
183 return BLK_MQ_TAG_FAIL;
186 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
191 if (unlikely(!tags->nr_reserved_tags)) {
193 return BLK_MQ_TAG_FAIL;
196 tag = bt_get(&tags->breserved_tags, NULL, &zero, gfp);
198 return BLK_MQ_TAG_FAIL;
203 unsigned int blk_mq_get_tag(struct blk_mq_tags *tags,
204 struct blk_mq_hw_ctx *hctx, unsigned int *last_tag,
205 gfp_t gfp, bool reserved)
208 return __blk_mq_get_tag(tags, hctx, last_tag, gfp);
210 return __blk_mq_get_reserved_tag(tags, gfp);
213 static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
217 wake_index = bt->wake_index;
218 for (i = 0; i < BT_WAIT_QUEUES; i++) {
219 struct bt_wait_state *bs = &bt->bs[wake_index];
221 if (waitqueue_active(&bs->wait)) {
222 if (wake_index != bt->wake_index)
223 bt->wake_index = wake_index;
228 bt_index_inc(&wake_index);
234 static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
236 const int index = TAG_TO_INDEX(bt, tag);
237 struct bt_wait_state *bs;
240 * The unlock memory barrier need to order access to req in free
241 * path and clearing tag bit
243 clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
245 bs = bt_wake_ptr(bt);
246 if (bs && atomic_dec_and_test(&bs->wait_cnt)) {
247 atomic_set(&bs->wait_cnt, bt->wake_cnt);
248 bt_index_inc(&bt->wake_index);
253 static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
255 BUG_ON(tag >= tags->nr_tags);
257 bt_clear_tag(&tags->bitmap_tags, tag);
260 static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
263 BUG_ON(tag >= tags->nr_reserved_tags);
265 bt_clear_tag(&tags->breserved_tags, tag);
268 void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag,
269 unsigned int *last_tag)
271 if (tag >= tags->nr_reserved_tags) {
272 const int real_tag = tag - tags->nr_reserved_tags;
274 __blk_mq_put_tag(tags, real_tag);
275 *last_tag = real_tag;
277 __blk_mq_put_reserved_tag(tags, tag);
280 static void bt_for_each_free(struct blk_mq_bitmap_tags *bt,
281 unsigned long *free_map, unsigned int off)
285 for (i = 0; i < bt->map_nr; i++) {
286 struct blk_mq_bitmap *bm = &bt->map[i];
290 bit = find_next_zero_bit(&bm->word, bm->depth, bit);
291 if (bit >= bm->depth)
294 __set_bit(bit + off, free_map);
298 off += (1 << bt->bits_per_word);
302 void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
303 void (*fn)(void *, unsigned long *), void *data)
305 unsigned long *tag_map;
308 map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
309 tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
313 bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags);
314 if (tags->nr_reserved_tags)
315 bt_for_each_free(&tags->breserved_tags, tag_map, 0);
321 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
323 unsigned int i, used;
325 for (i = 0, used = 0; i < bt->map_nr; i++) {
326 struct blk_mq_bitmap *bm = &bt->map[i];
328 used += bitmap_weight(&bm->word, bm->depth);
331 return bt->depth - used;
334 static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
335 int node, bool reserved)
339 bt->bits_per_word = ilog2(BITS_PER_LONG);
342 * Depth can be zero for reserved tags, that's not a failure
346 unsigned int nr, i, map_depth, tags_per_word;
348 tags_per_word = (1 << bt->bits_per_word);
351 * If the tag space is small, shrink the number of tags
352 * per word so we spread over a few cachelines, at least.
353 * If less than 4 tags, just forget about it, it's not
354 * going to work optimally anyway.
357 while (tags_per_word * 4 > depth) {
359 tags_per_word = (1 << bt->bits_per_word);
363 nr = ALIGN(depth, tags_per_word) / tags_per_word;
364 bt->map = kzalloc_node(nr * sizeof(struct blk_mq_bitmap),
371 for (i = 0; i < nr; i++) {
372 bt->map[i].depth = min(map_depth, tags_per_word);
373 map_depth -= tags_per_word;
377 bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
383 for (i = 0; i < BT_WAIT_QUEUES; i++)
384 init_waitqueue_head(&bt->bs[i].wait);
386 bt->wake_cnt = BT_WAIT_BATCH;
387 if (bt->wake_cnt > depth / 4)
388 bt->wake_cnt = max(1U, depth / 4);
394 static void bt_free(struct blk_mq_bitmap_tags *bt)
400 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
403 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
405 if (bt_alloc(&tags->bitmap_tags, depth, node, false))
407 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
412 bt_free(&tags->bitmap_tags);
417 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
418 unsigned int reserved_tags, int node)
420 struct blk_mq_tags *tags;
422 if (total_tags > BLK_MQ_TAG_MAX) {
423 pr_err("blk-mq: tag depth too large\n");
427 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
431 tags->nr_tags = total_tags;
432 tags->nr_reserved_tags = reserved_tags;
434 return blk_mq_init_bitmap_tags(tags, node);
437 void blk_mq_free_tags(struct blk_mq_tags *tags)
439 bt_free(&tags->bitmap_tags);
440 bt_free(&tags->breserved_tags);
444 void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
446 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
448 *tag = prandom_u32() % depth;
451 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
453 char *orig_page = page;
454 unsigned int free, res;
459 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
460 "bits_per_word=%u\n",
461 tags->nr_tags, tags->nr_reserved_tags,
462 tags->bitmap_tags.bits_per_word);
464 free = bt_unused_tags(&tags->bitmap_tags);
465 res = bt_unused_tags(&tags->breserved_tags);
467 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
469 return page - orig_page;