blk-mq: Avoid that __bt_get_word() wraps multiple times
[linux-2.6-block.git] / block / blk-mq-tag.c
CommitLineData
75bb4625
JA
1/*
2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3 * over multiple cachelines to avoid ping-pong between multiple submitters
4 * or submitter and completer. Uses rolling wakeups to avoid falling of
5 * the scaling cliff when we run out of tags and have to start putting
6 * submitters to sleep.
7 *
8 * Uses active queue tracking to support fairer distribution of tags
9 * between multiple submitters when a shared tag map is used.
10 *
11 * Copyright (C) 2013-2014 Jens Axboe
12 */
320ae51f
JA
13#include <linux/kernel.h>
14#include <linux/module.h>
4bb659b1 15#include <linux/random.h>
320ae51f
JA
16
17#include <linux/blk-mq.h>
18#include "blk.h"
19#include "blk-mq.h"
20#include "blk-mq-tag.h"
21
4bb659b1
JA
22static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
23{
24 int i;
25
26 for (i = 0; i < bt->map_nr; i++) {
e93ecf60 27 struct blk_align_bitmap *bm = &bt->map[i];
4bb659b1
JA
28 int ret;
29
30 ret = find_first_zero_bit(&bm->word, bm->depth);
31 if (ret < bm->depth)
32 return true;
33 }
34
35 return false;
320ae51f
JA
36}
37
38bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
39{
4bb659b1
JA
40 if (!tags)
41 return true;
42
43 return bt_has_free_tags(&tags->bitmap_tags);
44}
45
8537b120 46static inline int bt_index_inc(int index)
0d2602ca 47{
8537b120
AG
48 return (index + 1) & (BT_WAIT_QUEUES - 1);
49}
50
51static inline void bt_index_atomic_inc(atomic_t *index)
52{
53 int old = atomic_read(index);
54 int new = bt_index_inc(old);
55 atomic_cmpxchg(index, old, new);
0d2602ca
JA
56}
57
58/*
59 * If a previously inactive queue goes active, bump the active user count.
60 */
61bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
62{
63 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
64 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
65 atomic_inc(&hctx->tags->active_queues);
66
67 return true;
68}
69
70/*
e3a2b3f9 71 * Wakeup all potentially sleeping on normal (non-reserved) tags
0d2602ca 72 */
e3a2b3f9 73static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
0d2602ca 74{
0d2602ca
JA
75 struct blk_mq_bitmap_tags *bt;
76 int i, wake_index;
77
0d2602ca 78 bt = &tags->bitmap_tags;
8537b120 79 wake_index = atomic_read(&bt->wake_index);
0d2602ca
JA
80 for (i = 0; i < BT_WAIT_QUEUES; i++) {
81 struct bt_wait_state *bs = &bt->bs[wake_index];
82
83 if (waitqueue_active(&bs->wait))
84 wake_up(&bs->wait);
85
8537b120 86 wake_index = bt_index_inc(wake_index);
0d2602ca
JA
87 }
88}
89
e3a2b3f9
JA
90/*
91 * If a previously busy queue goes inactive, potential waiters could now
92 * be allowed to queue. Wake them up and check.
93 */
94void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
95{
96 struct blk_mq_tags *tags = hctx->tags;
97
98 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
99 return;
100
101 atomic_dec(&tags->active_queues);
102
103 blk_mq_tag_wakeup_all(tags);
104}
105
0d2602ca
JA
106/*
107 * For shared tag users, we track the number of currently active users
108 * and attempt to provide a fair share of the tag depth for each of them.
109 */
110static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
111 struct blk_mq_bitmap_tags *bt)
112{
113 unsigned int depth, users;
114
115 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
116 return true;
117 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
118 return true;
119
120 /*
121 * Don't try dividing an ant
122 */
123 if (bt->depth == 1)
124 return true;
125
126 users = atomic_read(&hctx->tags->active_queues);
127 if (!users)
128 return true;
129
130 /*
131 * Allow at least some tags
132 */
133 depth = max((bt->depth + users - 1) / users, 4U);
134 return atomic_read(&hctx->nr_active) < depth;
135}
136
e93ecf60 137static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
4bb659b1
JA
138{
139 int tag, org_last_tag, end;
9e98e9d7 140 bool wrap = last_tag != 0;
4bb659b1 141
59d13bf5 142 org_last_tag = last_tag;
4bb659b1
JA
143 end = bm->depth;
144 do {
145restart:
146 tag = find_next_zero_bit(&bm->word, end, last_tag);
147 if (unlikely(tag >= end)) {
148 /*
149 * We started with an offset, start from 0 to
150 * exhaust the map.
151 */
9e98e9d7
BVA
152 if (wrap) {
153 wrap = false;
154 end = org_last_tag;
4bb659b1
JA
155 last_tag = 0;
156 goto restart;
157 }
158 return -1;
159 }
160 last_tag = tag + 1;
161 } while (test_and_set_bit_lock(tag, &bm->word));
162
163 return tag;
164}
165
166/*
167 * Straight forward bitmap tag implementation, where each bit is a tag
168 * (cleared == free, and set == busy). The small twist is using per-cpu
169 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
170 * contexts. This enables us to drastically limit the space searched,
171 * without dirtying an extra shared cacheline like we would if we stored
172 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
173 * of that, each word of tags is in a separate cacheline. This means that
174 * multiple users will tend to stick to different cachelines, at least
175 * until the map is exhausted.
176 */
0d2602ca
JA
177static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
178 unsigned int *tag_cache)
4bb659b1
JA
179{
180 unsigned int last_tag, org_last_tag;
181 int index, i, tag;
182
0d2602ca
JA
183 if (!hctx_may_queue(hctx, bt))
184 return -1;
185
4bb659b1 186 last_tag = org_last_tag = *tag_cache;
59d13bf5 187 index = TAG_TO_INDEX(bt, last_tag);
4bb659b1
JA
188
189 for (i = 0; i < bt->map_nr; i++) {
59d13bf5 190 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
4bb659b1 191 if (tag != -1) {
59d13bf5 192 tag += (index << bt->bits_per_word);
4bb659b1
JA
193 goto done;
194 }
195
196 last_tag = 0;
197 if (++index >= bt->map_nr)
198 index = 0;
199 }
200
201 *tag_cache = 0;
202 return -1;
203
204 /*
205 * Only update the cache from the allocation path, if we ended
206 * up using the specific cached tag.
207 */
208done:
209 if (tag == org_last_tag) {
210 last_tag = tag + 1;
211 if (last_tag >= bt->depth - 1)
212 last_tag = 0;
213
214 *tag_cache = last_tag;
215 }
216
217 return tag;
218}
219
4bb659b1
JA
220static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
221 struct blk_mq_hw_ctx *hctx)
222{
223 struct bt_wait_state *bs;
8537b120 224 int wait_index;
4bb659b1
JA
225
226 if (!hctx)
227 return &bt->bs[0];
228
8537b120
AG
229 wait_index = atomic_read(&hctx->wait_index);
230 bs = &bt->bs[wait_index];
231 bt_index_atomic_inc(&hctx->wait_index);
4bb659b1 232 return bs;
320ae51f
JA
233}
234
cb96a42c
ML
235static int bt_get(struct blk_mq_alloc_data *data,
236 struct blk_mq_bitmap_tags *bt,
237 struct blk_mq_hw_ctx *hctx,
238 unsigned int *last_tag)
320ae51f 239{
4bb659b1
JA
240 struct bt_wait_state *bs;
241 DEFINE_WAIT(wait);
320ae51f
JA
242 int tag;
243
0d2602ca 244 tag = __bt_get(hctx, bt, last_tag);
4bb659b1
JA
245 if (tag != -1)
246 return tag;
247
cb96a42c 248 if (!(data->gfp & __GFP_WAIT))
4bb659b1
JA
249 return -1;
250
251 bs = bt_wait_ptr(bt, hctx);
252 do {
4bb659b1
JA
253 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
254
0d2602ca 255 tag = __bt_get(hctx, bt, last_tag);
4bb659b1
JA
256 if (tag != -1)
257 break;
258
b3223207
BVA
259 /*
260 * We're out of tags on this hardware queue, kick any
261 * pending IO submits before going to sleep waiting for
262 * some to complete.
263 */
264 blk_mq_run_hw_queue(hctx, false);
265
080ff351
JA
266 /*
267 * Retry tag allocation after running the hardware queue,
268 * as running the queue may also have found completions.
269 */
270 tag = __bt_get(hctx, bt, last_tag);
271 if (tag != -1)
272 break;
273
cb96a42c
ML
274 blk_mq_put_ctx(data->ctx);
275
4bb659b1 276 io_schedule();
cb96a42c
ML
277
278 data->ctx = blk_mq_get_ctx(data->q);
279 data->hctx = data->q->mq_ops->map_queue(data->q,
280 data->ctx->cpu);
281 if (data->reserved) {
282 bt = &data->hctx->tags->breserved_tags;
283 } else {
284 last_tag = &data->ctx->last_tag;
285 hctx = data->hctx;
286 bt = &hctx->tags->bitmap_tags;
287 }
288 finish_wait(&bs->wait, &wait);
289 bs = bt_wait_ptr(bt, hctx);
4bb659b1
JA
290 } while (1);
291
292 finish_wait(&bs->wait, &wait);
293 return tag;
294}
295
cb96a42c 296static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
4bb659b1
JA
297{
298 int tag;
299
cb96a42c
ML
300 tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
301 &data->ctx->last_tag);
4bb659b1 302 if (tag >= 0)
cb96a42c 303 return tag + data->hctx->tags->nr_reserved_tags;
4bb659b1
JA
304
305 return BLK_MQ_TAG_FAIL;
320ae51f
JA
306}
307
cb96a42c 308static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
320ae51f 309{
4bb659b1 310 int tag, zero = 0;
320ae51f 311
cb96a42c 312 if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
320ae51f
JA
313 WARN_ON_ONCE(1);
314 return BLK_MQ_TAG_FAIL;
315 }
316
cb96a42c 317 tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
320ae51f
JA
318 if (tag < 0)
319 return BLK_MQ_TAG_FAIL;
4bb659b1 320
320ae51f
JA
321 return tag;
322}
323
cb96a42c 324unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
320ae51f 325{
cb96a42c
ML
326 if (!data->reserved)
327 return __blk_mq_get_tag(data);
320ae51f 328
cb96a42c 329 return __blk_mq_get_reserved_tag(data);
320ae51f
JA
330}
331
4bb659b1
JA
332static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
333{
334 int i, wake_index;
335
8537b120 336 wake_index = atomic_read(&bt->wake_index);
4bb659b1
JA
337 for (i = 0; i < BT_WAIT_QUEUES; i++) {
338 struct bt_wait_state *bs = &bt->bs[wake_index];
339
340 if (waitqueue_active(&bs->wait)) {
8537b120
AG
341 int o = atomic_read(&bt->wake_index);
342 if (wake_index != o)
343 atomic_cmpxchg(&bt->wake_index, o, wake_index);
4bb659b1
JA
344
345 return bs;
346 }
347
8537b120 348 wake_index = bt_index_inc(wake_index);
4bb659b1
JA
349 }
350
351 return NULL;
352}
353
354static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
355{
59d13bf5 356 const int index = TAG_TO_INDEX(bt, tag);
4bb659b1 357 struct bt_wait_state *bs;
2971c35f 358 int wait_cnt;
4bb659b1 359
0289b2e1
ML
360 /*
361 * The unlock memory barrier need to order access to req in free
362 * path and clearing tag bit
363 */
364 clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
4bb659b1
JA
365
366 bs = bt_wake_ptr(bt);
2971c35f
AG
367 if (!bs)
368 return;
369
370 wait_cnt = atomic_dec_return(&bs->wait_cnt);
9d8f0bcc
BVA
371 if (unlikely(wait_cnt < 0))
372 wait_cnt = atomic_inc_return(&bs->wait_cnt);
2971c35f 373 if (wait_cnt == 0) {
2971c35f 374 atomic_add(bt->wake_cnt, &bs->wait_cnt);
8537b120 375 bt_index_atomic_inc(&bt->wake_index);
4bb659b1
JA
376 wake_up(&bs->wait);
377 }
378}
379
0d2602ca 380void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
4bb659b1 381 unsigned int *last_tag)
320ae51f 382{
0d2602ca
JA
383 struct blk_mq_tags *tags = hctx->tags;
384
4bb659b1
JA
385 if (tag >= tags->nr_reserved_tags) {
386 const int real_tag = tag - tags->nr_reserved_tags;
387
70114c39
JA
388 BUG_ON(real_tag >= tags->nr_tags);
389 bt_clear_tag(&tags->bitmap_tags, real_tag);
4bb659b1 390 *last_tag = real_tag;
70114c39
JA
391 } else {
392 BUG_ON(tag >= tags->nr_reserved_tags);
393 bt_clear_tag(&tags->breserved_tags, tag);
394 }
320ae51f
JA
395}
396
81481eb4
CH
397static void bt_for_each(struct blk_mq_hw_ctx *hctx,
398 struct blk_mq_bitmap_tags *bt, unsigned int off,
399 busy_iter_fn *fn, void *data, bool reserved)
320ae51f 400{
81481eb4
CH
401 struct request *rq;
402 int bit, i;
4bb659b1
JA
403
404 for (i = 0; i < bt->map_nr; i++) {
e93ecf60 405 struct blk_align_bitmap *bm = &bt->map[i];
4bb659b1 406
81481eb4
CH
407 for (bit = find_first_bit(&bm->word, bm->depth);
408 bit < bm->depth;
409 bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
410 rq = blk_mq_tag_to_rq(hctx->tags, off + bit);
411 if (rq->q == hctx->queue)
412 fn(hctx, rq, data, reserved);
413 }
4bb659b1 414
59d13bf5 415 off += (1 << bt->bits_per_word);
4bb659b1 416 }
320ae51f
JA
417}
418
81481eb4
CH
419void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
420 void *priv)
320ae51f 421{
81481eb4 422 struct blk_mq_tags *tags = hctx->tags;
320ae51f 423
320ae51f 424 if (tags->nr_reserved_tags)
81481eb4
CH
425 bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
426 bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
427 false);
320ae51f 428}
edf866b3 429EXPORT_SYMBOL(blk_mq_tag_busy_iter);
320ae51f 430
4bb659b1
JA
431static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
432{
433 unsigned int i, used;
434
435 for (i = 0, used = 0; i < bt->map_nr; i++) {
e93ecf60 436 struct blk_align_bitmap *bm = &bt->map[i];
4bb659b1
JA
437
438 used += bitmap_weight(&bm->word, bm->depth);
439 }
440
441 return bt->depth - used;
442}
443
e3a2b3f9
JA
444static void bt_update_count(struct blk_mq_bitmap_tags *bt,
445 unsigned int depth)
446{
447 unsigned int tags_per_word = 1U << bt->bits_per_word;
448 unsigned int map_depth = depth;
449
450 if (depth) {
451 int i;
452
453 for (i = 0; i < bt->map_nr; i++) {
454 bt->map[i].depth = min(map_depth, tags_per_word);
455 map_depth -= bt->map[i].depth;
456 }
457 }
458
459 bt->wake_cnt = BT_WAIT_BATCH;
abab13b5
JA
460 if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
461 bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
e3a2b3f9
JA
462
463 bt->depth = depth;
464}
465
4bb659b1
JA
466static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
467 int node, bool reserved)
468{
469 int i;
470
59d13bf5
JA
471 bt->bits_per_word = ilog2(BITS_PER_LONG);
472
4bb659b1
JA
473 /*
474 * Depth can be zero for reserved tags, that's not a failure
475 * condition.
476 */
477 if (depth) {
e3a2b3f9 478 unsigned int nr, tags_per_word;
59d13bf5
JA
479
480 tags_per_word = (1 << bt->bits_per_word);
481
482 /*
483 * If the tag space is small, shrink the number of tags
484 * per word so we spread over a few cachelines, at least.
485 * If less than 4 tags, just forget about it, it's not
486 * going to work optimally anyway.
487 */
488 if (depth >= 4) {
489 while (tags_per_word * 4 > depth) {
490 bt->bits_per_word--;
491 tags_per_word = (1 << bt->bits_per_word);
492 }
493 }
4bb659b1 494
59d13bf5 495 nr = ALIGN(depth, tags_per_word) / tags_per_word;
e93ecf60 496 bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
4bb659b1
JA
497 GFP_KERNEL, node);
498 if (!bt->map)
499 return -ENOMEM;
500
501 bt->map_nr = nr;
4bb659b1
JA
502 }
503
504 bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
505 if (!bt->bs) {
506 kfree(bt->map);
507 return -ENOMEM;
508 }
509
86fb5c56
AG
510 bt_update_count(bt, depth);
511
512 for (i = 0; i < BT_WAIT_QUEUES; i++) {
4bb659b1 513 init_waitqueue_head(&bt->bs[i].wait);
86fb5c56
AG
514 atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
515 }
4bb659b1 516
4bb659b1
JA
517 return 0;
518}
519
520static void bt_free(struct blk_mq_bitmap_tags *bt)
521{
522 kfree(bt->map);
523 kfree(bt->bs);
524}
525
526static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
527 int node)
528{
529 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
530
531 if (bt_alloc(&tags->bitmap_tags, depth, node, false))
532 goto enomem;
533 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
534 goto enomem;
535
536 return tags;
537enomem:
538 bt_free(&tags->bitmap_tags);
539 kfree(tags);
540 return NULL;
541}
542
320ae51f
JA
543struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
544 unsigned int reserved_tags, int node)
545{
320ae51f 546 struct blk_mq_tags *tags;
320ae51f
JA
547
548 if (total_tags > BLK_MQ_TAG_MAX) {
549 pr_err("blk-mq: tag depth too large\n");
550 return NULL;
551 }
552
553 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
554 if (!tags)
555 return NULL;
556
320ae51f
JA
557 tags->nr_tags = total_tags;
558 tags->nr_reserved_tags = reserved_tags;
320ae51f 559
4bb659b1 560 return blk_mq_init_bitmap_tags(tags, node);
320ae51f
JA
561}
562
563void blk_mq_free_tags(struct blk_mq_tags *tags)
564{
4bb659b1
JA
565 bt_free(&tags->bitmap_tags);
566 bt_free(&tags->breserved_tags);
320ae51f
JA
567 kfree(tags);
568}
569
4bb659b1
JA
570void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
571{
572 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
573
9d3d21ae 574 *tag = prandom_u32() % depth;
4bb659b1
JA
575}
576
e3a2b3f9
JA
577int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
578{
579 tdepth -= tags->nr_reserved_tags;
580 if (tdepth > tags->nr_tags)
581 return -EINVAL;
582
583 /*
584 * Don't need (or can't) update reserved tags here, they remain
585 * static and should never need resizing.
586 */
587 bt_update_count(&tags->bitmap_tags, tdepth);
588 blk_mq_tag_wakeup_all(tags);
589 return 0;
590}
591
320ae51f
JA
592ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
593{
594 char *orig_page = page;
4bb659b1 595 unsigned int free, res;
320ae51f
JA
596
597 if (!tags)
598 return 0;
599
59d13bf5
JA
600 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
601 "bits_per_word=%u\n",
602 tags->nr_tags, tags->nr_reserved_tags,
603 tags->bitmap_tags.bits_per_word);
320ae51f 604
4bb659b1
JA
605 free = bt_unused_tags(&tags->bitmap_tags);
606 res = bt_unused_tags(&tags->breserved_tags);
320ae51f 607
4bb659b1 608 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
0d2602ca 609 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
320ae51f
JA
610
611 return page - orig_page;
612}