Commit | Line | Data |
---|---|---|
75bb4625 JA |
1 | /* |
2 | * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread | |
3 | * over multiple cachelines to avoid ping-pong between multiple submitters | |
4 | * or submitter and completer. Uses rolling wakeups to avoid falling of | |
5 | * the scaling cliff when we run out of tags and have to start putting | |
6 | * submitters to sleep. | |
7 | * | |
8 | * Uses active queue tracking to support fairer distribution of tags | |
9 | * between multiple submitters when a shared tag map is used. | |
10 | * | |
11 | * Copyright (C) 2013-2014 Jens Axboe | |
12 | */ | |
320ae51f JA |
13 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | |
4bb659b1 | 15 | #include <linux/random.h> |
320ae51f JA |
16 | |
17 | #include <linux/blk-mq.h> | |
18 | #include "blk.h" | |
19 | #include "blk-mq.h" | |
20 | #include "blk-mq-tag.h" | |
21 | ||
4bb659b1 JA |
22 | static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) |
23 | { | |
24 | int i; | |
25 | ||
26 | for (i = 0; i < bt->map_nr; i++) { | |
e93ecf60 | 27 | struct blk_align_bitmap *bm = &bt->map[i]; |
4bb659b1 JA |
28 | int ret; |
29 | ||
30 | ret = find_first_zero_bit(&bm->word, bm->depth); | |
31 | if (ret < bm->depth) | |
32 | return true; | |
33 | } | |
34 | ||
35 | return false; | |
320ae51f JA |
36 | } |
37 | ||
38 | bool blk_mq_has_free_tags(struct blk_mq_tags *tags) | |
39 | { | |
4bb659b1 JA |
40 | if (!tags) |
41 | return true; | |
42 | ||
43 | return bt_has_free_tags(&tags->bitmap_tags); | |
44 | } | |
45 | ||
8537b120 | 46 | static inline int bt_index_inc(int index) |
0d2602ca | 47 | { |
8537b120 AG |
48 | return (index + 1) & (BT_WAIT_QUEUES - 1); |
49 | } | |
50 | ||
51 | static inline void bt_index_atomic_inc(atomic_t *index) | |
52 | { | |
53 | int old = atomic_read(index); | |
54 | int new = bt_index_inc(old); | |
55 | atomic_cmpxchg(index, old, new); | |
0d2602ca JA |
56 | } |
57 | ||
58 | /* | |
59 | * If a previously inactive queue goes active, bump the active user count. | |
60 | */ | |
61 | bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) | |
62 | { | |
63 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && | |
64 | !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
65 | atomic_inc(&hctx->tags->active_queues); | |
66 | ||
67 | return true; | |
68 | } | |
69 | ||
70 | /* | |
aed3ea94 | 71 | * Wakeup all potentially sleeping on tags |
0d2602ca | 72 | */ |
aed3ea94 | 73 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
0d2602ca | 74 | { |
0d2602ca JA |
75 | struct blk_mq_bitmap_tags *bt; |
76 | int i, wake_index; | |
77 | ||
8ee1b7b9 KT |
78 | /* |
79 | * Make sure all changes prior to this are visible from other CPUs. | |
80 | */ | |
81 | smp_mb(); | |
0d2602ca | 82 | bt = &tags->bitmap_tags; |
8537b120 | 83 | wake_index = atomic_read(&bt->wake_index); |
0d2602ca JA |
84 | for (i = 0; i < BT_WAIT_QUEUES; i++) { |
85 | struct bt_wait_state *bs = &bt->bs[wake_index]; | |
86 | ||
87 | if (waitqueue_active(&bs->wait)) | |
88 | wake_up(&bs->wait); | |
89 | ||
8537b120 | 90 | wake_index = bt_index_inc(wake_index); |
0d2602ca | 91 | } |
aed3ea94 JA |
92 | |
93 | if (include_reserve) { | |
94 | bt = &tags->breserved_tags; | |
95 | if (waitqueue_active(&bt->bs[0].wait)) | |
96 | wake_up(&bt->bs[0].wait); | |
97 | } | |
0d2602ca JA |
98 | } |
99 | ||
e3a2b3f9 JA |
100 | /* |
101 | * If a previously busy queue goes inactive, potential waiters could now | |
102 | * be allowed to queue. Wake them up and check. | |
103 | */ | |
104 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) | |
105 | { | |
106 | struct blk_mq_tags *tags = hctx->tags; | |
107 | ||
108 | if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
109 | return; | |
110 | ||
111 | atomic_dec(&tags->active_queues); | |
112 | ||
aed3ea94 | 113 | blk_mq_tag_wakeup_all(tags, false); |
e3a2b3f9 JA |
114 | } |
115 | ||
0d2602ca JA |
116 | /* |
117 | * For shared tag users, we track the number of currently active users | |
118 | * and attempt to provide a fair share of the tag depth for each of them. | |
119 | */ | |
120 | static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, | |
121 | struct blk_mq_bitmap_tags *bt) | |
122 | { | |
123 | unsigned int depth, users; | |
124 | ||
125 | if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) | |
126 | return true; | |
127 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
128 | return true; | |
129 | ||
130 | /* | |
131 | * Don't try dividing an ant | |
132 | */ | |
133 | if (bt->depth == 1) | |
134 | return true; | |
135 | ||
136 | users = atomic_read(&hctx->tags->active_queues); | |
137 | if (!users) | |
138 | return true; | |
139 | ||
140 | /* | |
141 | * Allow at least some tags | |
142 | */ | |
143 | depth = max((bt->depth + users - 1) / users, 4U); | |
144 | return atomic_read(&hctx->nr_active) < depth; | |
145 | } | |
146 | ||
24391c0d SL |
147 | static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag, |
148 | bool nowrap) | |
4bb659b1 | 149 | { |
0bf36498 JA |
150 | int tag, org_last_tag = last_tag; |
151 | ||
152 | while (1) { | |
153 | tag = find_next_zero_bit(&bm->word, bm->depth, last_tag); | |
154 | if (unlikely(tag >= bm->depth)) { | |
4bb659b1 | 155 | /* |
0bf36498 JA |
156 | * We started with an offset, and we didn't reset the |
157 | * offset to 0 in a failure case, so start from 0 to | |
4bb659b1 JA |
158 | * exhaust the map. |
159 | */ | |
24391c0d | 160 | if (org_last_tag && last_tag && !nowrap) { |
0bf36498 JA |
161 | last_tag = org_last_tag = 0; |
162 | continue; | |
4bb659b1 JA |
163 | } |
164 | return -1; | |
165 | } | |
0bf36498 JA |
166 | |
167 | if (!test_and_set_bit(tag, &bm->word)) | |
168 | break; | |
169 | ||
4bb659b1 | 170 | last_tag = tag + 1; |
0bf36498 JA |
171 | if (last_tag >= bm->depth - 1) |
172 | last_tag = 0; | |
173 | } | |
4bb659b1 JA |
174 | |
175 | return tag; | |
176 | } | |
177 | ||
24391c0d SL |
178 | #define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR) |
179 | ||
4bb659b1 JA |
180 | /* |
181 | * Straight forward bitmap tag implementation, where each bit is a tag | |
182 | * (cleared == free, and set == busy). The small twist is using per-cpu | |
183 | * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue | |
184 | * contexts. This enables us to drastically limit the space searched, | |
185 | * without dirtying an extra shared cacheline like we would if we stored | |
186 | * the cache value inside the shared blk_mq_bitmap_tags structure. On top | |
187 | * of that, each word of tags is in a separate cacheline. This means that | |
188 | * multiple users will tend to stick to different cachelines, at least | |
189 | * until the map is exhausted. | |
190 | */ | |
0d2602ca | 191 | static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt, |
24391c0d | 192 | unsigned int *tag_cache, struct blk_mq_tags *tags) |
4bb659b1 JA |
193 | { |
194 | unsigned int last_tag, org_last_tag; | |
195 | int index, i, tag; | |
196 | ||
0d2602ca JA |
197 | if (!hctx_may_queue(hctx, bt)) |
198 | return -1; | |
199 | ||
4bb659b1 | 200 | last_tag = org_last_tag = *tag_cache; |
59d13bf5 | 201 | index = TAG_TO_INDEX(bt, last_tag); |
4bb659b1 JA |
202 | |
203 | for (i = 0; i < bt->map_nr; i++) { | |
24391c0d SL |
204 | tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag), |
205 | BT_ALLOC_RR(tags)); | |
4bb659b1 | 206 | if (tag != -1) { |
59d13bf5 | 207 | tag += (index << bt->bits_per_word); |
4bb659b1 JA |
208 | goto done; |
209 | } | |
210 | ||
0bf36498 JA |
211 | /* |
212 | * Jump to next index, and reset the last tag to be the | |
213 | * first tag of that index | |
214 | */ | |
215 | index++; | |
216 | last_tag = (index << bt->bits_per_word); | |
217 | ||
218 | if (index >= bt->map_nr) { | |
4bb659b1 | 219 | index = 0; |
0bf36498 JA |
220 | last_tag = 0; |
221 | } | |
4bb659b1 JA |
222 | } |
223 | ||
224 | *tag_cache = 0; | |
225 | return -1; | |
226 | ||
227 | /* | |
228 | * Only update the cache from the allocation path, if we ended | |
229 | * up using the specific cached tag. | |
230 | */ | |
231 | done: | |
24391c0d | 232 | if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) { |
4bb659b1 JA |
233 | last_tag = tag + 1; |
234 | if (last_tag >= bt->depth - 1) | |
235 | last_tag = 0; | |
236 | ||
237 | *tag_cache = last_tag; | |
238 | } | |
239 | ||
240 | return tag; | |
241 | } | |
242 | ||
4bb659b1 JA |
243 | static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, |
244 | struct blk_mq_hw_ctx *hctx) | |
245 | { | |
246 | struct bt_wait_state *bs; | |
8537b120 | 247 | int wait_index; |
4bb659b1 JA |
248 | |
249 | if (!hctx) | |
250 | return &bt->bs[0]; | |
251 | ||
8537b120 AG |
252 | wait_index = atomic_read(&hctx->wait_index); |
253 | bs = &bt->bs[wait_index]; | |
254 | bt_index_atomic_inc(&hctx->wait_index); | |
4bb659b1 | 255 | return bs; |
320ae51f JA |
256 | } |
257 | ||
cb96a42c ML |
258 | static int bt_get(struct blk_mq_alloc_data *data, |
259 | struct blk_mq_bitmap_tags *bt, | |
260 | struct blk_mq_hw_ctx *hctx, | |
24391c0d | 261 | unsigned int *last_tag, struct blk_mq_tags *tags) |
320ae51f | 262 | { |
4bb659b1 JA |
263 | struct bt_wait_state *bs; |
264 | DEFINE_WAIT(wait); | |
320ae51f JA |
265 | int tag; |
266 | ||
24391c0d | 267 | tag = __bt_get(hctx, bt, last_tag, tags); |
4bb659b1 JA |
268 | if (tag != -1) |
269 | return tag; | |
270 | ||
6f3b0e8b | 271 | if (data->flags & BLK_MQ_REQ_NOWAIT) |
4bb659b1 JA |
272 | return -1; |
273 | ||
35d37c66 | 274 | bs = bt_wait_ptr(bt, hctx); |
4bb659b1 | 275 | do { |
4bb659b1 JA |
276 | prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); |
277 | ||
24391c0d | 278 | tag = __bt_get(hctx, bt, last_tag, tags); |
4bb659b1 JA |
279 | if (tag != -1) |
280 | break; | |
281 | ||
b3223207 BVA |
282 | /* |
283 | * We're out of tags on this hardware queue, kick any | |
284 | * pending IO submits before going to sleep waiting for | |
bc188d81 SB |
285 | * some to complete. Note that hctx can be NULL here for |
286 | * reserved tag allocation. | |
b3223207 | 287 | */ |
bc188d81 SB |
288 | if (hctx) |
289 | blk_mq_run_hw_queue(hctx, false); | |
b3223207 | 290 | |
080ff351 JA |
291 | /* |
292 | * Retry tag allocation after running the hardware queue, | |
293 | * as running the queue may also have found completions. | |
294 | */ | |
24391c0d | 295 | tag = __bt_get(hctx, bt, last_tag, tags); |
080ff351 JA |
296 | if (tag != -1) |
297 | break; | |
298 | ||
cb96a42c ML |
299 | blk_mq_put_ctx(data->ctx); |
300 | ||
4bb659b1 | 301 | io_schedule(); |
cb96a42c ML |
302 | |
303 | data->ctx = blk_mq_get_ctx(data->q); | |
304 | data->hctx = data->q->mq_ops->map_queue(data->q, | |
305 | data->ctx->cpu); | |
6f3b0e8b | 306 | if (data->flags & BLK_MQ_REQ_RESERVED) { |
cb96a42c ML |
307 | bt = &data->hctx->tags->breserved_tags; |
308 | } else { | |
309 | last_tag = &data->ctx->last_tag; | |
310 | hctx = data->hctx; | |
311 | bt = &hctx->tags->bitmap_tags; | |
312 | } | |
35d37c66 JA |
313 | finish_wait(&bs->wait, &wait); |
314 | bs = bt_wait_ptr(bt, hctx); | |
4bb659b1 JA |
315 | } while (1); |
316 | ||
317 | finish_wait(&bs->wait, &wait); | |
318 | return tag; | |
319 | } | |
320 | ||
cb96a42c | 321 | static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data) |
4bb659b1 JA |
322 | { |
323 | int tag; | |
324 | ||
cb96a42c | 325 | tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx, |
24391c0d | 326 | &data->ctx->last_tag, data->hctx->tags); |
4bb659b1 | 327 | if (tag >= 0) |
cb96a42c | 328 | return tag + data->hctx->tags->nr_reserved_tags; |
4bb659b1 JA |
329 | |
330 | return BLK_MQ_TAG_FAIL; | |
320ae51f JA |
331 | } |
332 | ||
cb96a42c | 333 | static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data) |
320ae51f | 334 | { |
4bb659b1 | 335 | int tag, zero = 0; |
320ae51f | 336 | |
cb96a42c | 337 | if (unlikely(!data->hctx->tags->nr_reserved_tags)) { |
320ae51f JA |
338 | WARN_ON_ONCE(1); |
339 | return BLK_MQ_TAG_FAIL; | |
340 | } | |
341 | ||
24391c0d SL |
342 | tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero, |
343 | data->hctx->tags); | |
320ae51f JA |
344 | if (tag < 0) |
345 | return BLK_MQ_TAG_FAIL; | |
4bb659b1 | 346 | |
320ae51f JA |
347 | return tag; |
348 | } | |
349 | ||
cb96a42c | 350 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
320ae51f | 351 | { |
6f3b0e8b CH |
352 | if (data->flags & BLK_MQ_REQ_RESERVED) |
353 | return __blk_mq_get_reserved_tag(data); | |
354 | return __blk_mq_get_tag(data); | |
320ae51f JA |
355 | } |
356 | ||
4bb659b1 JA |
357 | static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) |
358 | { | |
359 | int i, wake_index; | |
360 | ||
8537b120 | 361 | wake_index = atomic_read(&bt->wake_index); |
4bb659b1 JA |
362 | for (i = 0; i < BT_WAIT_QUEUES; i++) { |
363 | struct bt_wait_state *bs = &bt->bs[wake_index]; | |
364 | ||
365 | if (waitqueue_active(&bs->wait)) { | |
8537b120 AG |
366 | int o = atomic_read(&bt->wake_index); |
367 | if (wake_index != o) | |
368 | atomic_cmpxchg(&bt->wake_index, o, wake_index); | |
4bb659b1 JA |
369 | |
370 | return bs; | |
371 | } | |
372 | ||
8537b120 | 373 | wake_index = bt_index_inc(wake_index); |
4bb659b1 JA |
374 | } |
375 | ||
376 | return NULL; | |
377 | } | |
378 | ||
379 | static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) | |
380 | { | |
59d13bf5 | 381 | const int index = TAG_TO_INDEX(bt, tag); |
4bb659b1 | 382 | struct bt_wait_state *bs; |
2971c35f | 383 | int wait_cnt; |
4bb659b1 | 384 | |
c38d185d BVA |
385 | clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word); |
386 | ||
387 | /* Ensure that the wait list checks occur after clear_bit(). */ | |
388 | smp_mb(); | |
4bb659b1 JA |
389 | |
390 | bs = bt_wake_ptr(bt); | |
2971c35f AG |
391 | if (!bs) |
392 | return; | |
393 | ||
394 | wait_cnt = atomic_dec_return(&bs->wait_cnt); | |
9d8f0bcc BVA |
395 | if (unlikely(wait_cnt < 0)) |
396 | wait_cnt = atomic_inc_return(&bs->wait_cnt); | |
2971c35f | 397 | if (wait_cnt == 0) { |
2971c35f | 398 | atomic_add(bt->wake_cnt, &bs->wait_cnt); |
8537b120 | 399 | bt_index_atomic_inc(&bt->wake_index); |
4bb659b1 JA |
400 | wake_up(&bs->wait); |
401 | } | |
402 | } | |
403 | ||
0d2602ca | 404 | void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, |
4bb659b1 | 405 | unsigned int *last_tag) |
320ae51f | 406 | { |
0d2602ca JA |
407 | struct blk_mq_tags *tags = hctx->tags; |
408 | ||
4bb659b1 JA |
409 | if (tag >= tags->nr_reserved_tags) { |
410 | const int real_tag = tag - tags->nr_reserved_tags; | |
411 | ||
70114c39 JA |
412 | BUG_ON(real_tag >= tags->nr_tags); |
413 | bt_clear_tag(&tags->bitmap_tags, real_tag); | |
24391c0d SL |
414 | if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO)) |
415 | *last_tag = real_tag; | |
70114c39 JA |
416 | } else { |
417 | BUG_ON(tag >= tags->nr_reserved_tags); | |
418 | bt_clear_tag(&tags->breserved_tags, tag); | |
419 | } | |
320ae51f JA |
420 | } |
421 | ||
81481eb4 CH |
422 | static void bt_for_each(struct blk_mq_hw_ctx *hctx, |
423 | struct blk_mq_bitmap_tags *bt, unsigned int off, | |
424 | busy_iter_fn *fn, void *data, bool reserved) | |
320ae51f | 425 | { |
81481eb4 CH |
426 | struct request *rq; |
427 | int bit, i; | |
4bb659b1 JA |
428 | |
429 | for (i = 0; i < bt->map_nr; i++) { | |
e93ecf60 | 430 | struct blk_align_bitmap *bm = &bt->map[i]; |
4bb659b1 | 431 | |
81481eb4 CH |
432 | for (bit = find_first_bit(&bm->word, bm->depth); |
433 | bit < bm->depth; | |
434 | bit = find_next_bit(&bm->word, bm->depth, bit + 1)) { | |
0048b483 | 435 | rq = hctx->tags->rqs[off + bit]; |
81481eb4 CH |
436 | if (rq->q == hctx->queue) |
437 | fn(hctx, rq, data, reserved); | |
438 | } | |
4bb659b1 | 439 | |
59d13bf5 | 440 | off += (1 << bt->bits_per_word); |
4bb659b1 | 441 | } |
320ae51f JA |
442 | } |
443 | ||
f26cdc85 KB |
444 | static void bt_tags_for_each(struct blk_mq_tags *tags, |
445 | struct blk_mq_bitmap_tags *bt, unsigned int off, | |
446 | busy_tag_iter_fn *fn, void *data, bool reserved) | |
447 | { | |
448 | struct request *rq; | |
449 | int bit, i; | |
450 | ||
451 | if (!tags->rqs) | |
452 | return; | |
453 | for (i = 0; i < bt->map_nr; i++) { | |
454 | struct blk_align_bitmap *bm = &bt->map[i]; | |
455 | ||
456 | for (bit = find_first_bit(&bm->word, bm->depth); | |
457 | bit < bm->depth; | |
458 | bit = find_next_bit(&bm->word, bm->depth, bit + 1)) { | |
0048b483 | 459 | rq = tags->rqs[off + bit]; |
f26cdc85 KB |
460 | fn(rq, data, reserved); |
461 | } | |
462 | ||
463 | off += (1 << bt->bits_per_word); | |
464 | } | |
465 | } | |
466 | ||
e8f1e163 SG |
467 | static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, |
468 | busy_tag_iter_fn *fn, void *priv) | |
f26cdc85 KB |
469 | { |
470 | if (tags->nr_reserved_tags) | |
471 | bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true); | |
472 | bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, | |
473 | false); | |
474 | } | |
f26cdc85 | 475 | |
e0489487 SG |
476 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
477 | busy_tag_iter_fn *fn, void *priv) | |
478 | { | |
479 | int i; | |
480 | ||
481 | for (i = 0; i < tagset->nr_hw_queues; i++) { | |
482 | if (tagset->tags && tagset->tags[i]) | |
483 | blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); | |
484 | } | |
485 | } | |
486 | EXPORT_SYMBOL(blk_mq_tagset_busy_iter); | |
487 | ||
0bf6cd5b | 488 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, |
81481eb4 | 489 | void *priv) |
320ae51f | 490 | { |
0bf6cd5b CH |
491 | struct blk_mq_hw_ctx *hctx; |
492 | int i; | |
493 | ||
494 | ||
495 | queue_for_each_hw_ctx(q, hctx, i) { | |
496 | struct blk_mq_tags *tags = hctx->tags; | |
497 | ||
498 | /* | |
499 | * If not software queues are currently mapped to this | |
500 | * hardware queue, there's nothing to check | |
501 | */ | |
502 | if (!blk_mq_hw_queue_mapped(hctx)) | |
503 | continue; | |
504 | ||
505 | if (tags->nr_reserved_tags) | |
506 | bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); | |
507 | bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, | |
508 | false); | |
509 | } | |
320ae51f | 510 | |
320ae51f JA |
511 | } |
512 | ||
4bb659b1 JA |
513 | static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) |
514 | { | |
515 | unsigned int i, used; | |
516 | ||
517 | for (i = 0, used = 0; i < bt->map_nr; i++) { | |
e93ecf60 | 518 | struct blk_align_bitmap *bm = &bt->map[i]; |
4bb659b1 JA |
519 | |
520 | used += bitmap_weight(&bm->word, bm->depth); | |
521 | } | |
522 | ||
523 | return bt->depth - used; | |
524 | } | |
525 | ||
e3a2b3f9 JA |
526 | static void bt_update_count(struct blk_mq_bitmap_tags *bt, |
527 | unsigned int depth) | |
528 | { | |
529 | unsigned int tags_per_word = 1U << bt->bits_per_word; | |
530 | unsigned int map_depth = depth; | |
531 | ||
532 | if (depth) { | |
533 | int i; | |
534 | ||
535 | for (i = 0; i < bt->map_nr; i++) { | |
536 | bt->map[i].depth = min(map_depth, tags_per_word); | |
537 | map_depth -= bt->map[i].depth; | |
538 | } | |
539 | } | |
540 | ||
541 | bt->wake_cnt = BT_WAIT_BATCH; | |
abab13b5 JA |
542 | if (bt->wake_cnt > depth / BT_WAIT_QUEUES) |
543 | bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES); | |
e3a2b3f9 JA |
544 | |
545 | bt->depth = depth; | |
546 | } | |
547 | ||
4bb659b1 JA |
548 | static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, |
549 | int node, bool reserved) | |
550 | { | |
551 | int i; | |
552 | ||
59d13bf5 JA |
553 | bt->bits_per_word = ilog2(BITS_PER_LONG); |
554 | ||
4bb659b1 JA |
555 | /* |
556 | * Depth can be zero for reserved tags, that's not a failure | |
557 | * condition. | |
558 | */ | |
559 | if (depth) { | |
e3a2b3f9 | 560 | unsigned int nr, tags_per_word; |
59d13bf5 JA |
561 | |
562 | tags_per_word = (1 << bt->bits_per_word); | |
563 | ||
564 | /* | |
565 | * If the tag space is small, shrink the number of tags | |
566 | * per word so we spread over a few cachelines, at least. | |
567 | * If less than 4 tags, just forget about it, it's not | |
568 | * going to work optimally anyway. | |
569 | */ | |
570 | if (depth >= 4) { | |
571 | while (tags_per_word * 4 > depth) { | |
572 | bt->bits_per_word--; | |
573 | tags_per_word = (1 << bt->bits_per_word); | |
574 | } | |
575 | } | |
4bb659b1 | 576 | |
59d13bf5 | 577 | nr = ALIGN(depth, tags_per_word) / tags_per_word; |
e93ecf60 | 578 | bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap), |
4bb659b1 JA |
579 | GFP_KERNEL, node); |
580 | if (!bt->map) | |
581 | return -ENOMEM; | |
582 | ||
583 | bt->map_nr = nr; | |
4bb659b1 JA |
584 | } |
585 | ||
586 | bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL); | |
587 | if (!bt->bs) { | |
588 | kfree(bt->map); | |
564e559f | 589 | bt->map = NULL; |
4bb659b1 JA |
590 | return -ENOMEM; |
591 | } | |
592 | ||
86fb5c56 AG |
593 | bt_update_count(bt, depth); |
594 | ||
595 | for (i = 0; i < BT_WAIT_QUEUES; i++) { | |
4bb659b1 | 596 | init_waitqueue_head(&bt->bs[i].wait); |
86fb5c56 AG |
597 | atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt); |
598 | } | |
4bb659b1 | 599 | |
4bb659b1 JA |
600 | return 0; |
601 | } | |
602 | ||
603 | static void bt_free(struct blk_mq_bitmap_tags *bt) | |
604 | { | |
605 | kfree(bt->map); | |
606 | kfree(bt->bs); | |
607 | } | |
608 | ||
609 | static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, | |
24391c0d | 610 | int node, int alloc_policy) |
4bb659b1 JA |
611 | { |
612 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; | |
613 | ||
24391c0d SL |
614 | tags->alloc_policy = alloc_policy; |
615 | ||
4bb659b1 JA |
616 | if (bt_alloc(&tags->bitmap_tags, depth, node, false)) |
617 | goto enomem; | |
618 | if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true)) | |
619 | goto enomem; | |
620 | ||
621 | return tags; | |
622 | enomem: | |
623 | bt_free(&tags->bitmap_tags); | |
624 | kfree(tags); | |
625 | return NULL; | |
626 | } | |
627 | ||
320ae51f | 628 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
24391c0d SL |
629 | unsigned int reserved_tags, |
630 | int node, int alloc_policy) | |
320ae51f | 631 | { |
320ae51f | 632 | struct blk_mq_tags *tags; |
320ae51f JA |
633 | |
634 | if (total_tags > BLK_MQ_TAG_MAX) { | |
635 | pr_err("blk-mq: tag depth too large\n"); | |
636 | return NULL; | |
637 | } | |
638 | ||
639 | tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); | |
640 | if (!tags) | |
641 | return NULL; | |
642 | ||
f26cdc85 KB |
643 | if (!zalloc_cpumask_var(&tags->cpumask, GFP_KERNEL)) { |
644 | kfree(tags); | |
645 | return NULL; | |
646 | } | |
647 | ||
320ae51f JA |
648 | tags->nr_tags = total_tags; |
649 | tags->nr_reserved_tags = reserved_tags; | |
320ae51f | 650 | |
24391c0d | 651 | return blk_mq_init_bitmap_tags(tags, node, alloc_policy); |
320ae51f JA |
652 | } |
653 | ||
654 | void blk_mq_free_tags(struct blk_mq_tags *tags) | |
655 | { | |
4bb659b1 JA |
656 | bt_free(&tags->bitmap_tags); |
657 | bt_free(&tags->breserved_tags); | |
f42d79ab | 658 | free_cpumask_var(tags->cpumask); |
320ae51f JA |
659 | kfree(tags); |
660 | } | |
661 | ||
4bb659b1 JA |
662 | void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) |
663 | { | |
664 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; | |
665 | ||
9d3d21ae | 666 | *tag = prandom_u32() % depth; |
4bb659b1 JA |
667 | } |
668 | ||
e3a2b3f9 JA |
669 | int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) |
670 | { | |
671 | tdepth -= tags->nr_reserved_tags; | |
672 | if (tdepth > tags->nr_tags) | |
673 | return -EINVAL; | |
674 | ||
675 | /* | |
676 | * Don't need (or can't) update reserved tags here, they remain | |
677 | * static and should never need resizing. | |
678 | */ | |
679 | bt_update_count(&tags->bitmap_tags, tdepth); | |
aed3ea94 | 680 | blk_mq_tag_wakeup_all(tags, false); |
e3a2b3f9 JA |
681 | return 0; |
682 | } | |
683 | ||
205fb5f5 BVA |
684 | /** |
685 | * blk_mq_unique_tag() - return a tag that is unique queue-wide | |
686 | * @rq: request for which to compute a unique tag | |
687 | * | |
688 | * The tag field in struct request is unique per hardware queue but not over | |
689 | * all hardware queues. Hence this function that returns a tag with the | |
690 | * hardware context index in the upper bits and the per hardware queue tag in | |
691 | * the lower bits. | |
692 | * | |
693 | * Note: When called for a request that is queued on a non-multiqueue request | |
694 | * queue, the hardware context index is set to zero. | |
695 | */ | |
696 | u32 blk_mq_unique_tag(struct request *rq) | |
697 | { | |
698 | struct request_queue *q = rq->q; | |
699 | struct blk_mq_hw_ctx *hctx; | |
700 | int hwq = 0; | |
701 | ||
702 | if (q->mq_ops) { | |
703 | hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); | |
704 | hwq = hctx->queue_num; | |
705 | } | |
706 | ||
707 | return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | | |
708 | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); | |
709 | } | |
710 | EXPORT_SYMBOL(blk_mq_unique_tag); | |
711 | ||
320ae51f JA |
712 | ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) |
713 | { | |
714 | char *orig_page = page; | |
4bb659b1 | 715 | unsigned int free, res; |
320ae51f JA |
716 | |
717 | if (!tags) | |
718 | return 0; | |
719 | ||
59d13bf5 JA |
720 | page += sprintf(page, "nr_tags=%u, reserved_tags=%u, " |
721 | "bits_per_word=%u\n", | |
722 | tags->nr_tags, tags->nr_reserved_tags, | |
723 | tags->bitmap_tags.bits_per_word); | |
320ae51f | 724 | |
4bb659b1 JA |
725 | free = bt_unused_tags(&tags->bitmap_tags); |
726 | res = bt_unused_tags(&tags->breserved_tags); | |
320ae51f | 727 | |
4bb659b1 | 728 | page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); |
0d2602ca | 729 | page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues)); |
320ae51f JA |
730 | |
731 | return page - orig_page; | |
732 | } |