| 1 | /* |
| 2 | * Tag allocation using scalable bitmaps. Uses active queue tracking to support |
| 3 | * fairer distribution of tags between multiple submitters when a shared tag map |
| 4 | * is used. |
| 5 | * |
| 6 | * Copyright (C) 2013-2014 Jens Axboe |
| 7 | */ |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/module.h> |
| 10 | |
| 11 | #include <linux/blk-mq.h> |
| 12 | #include "blk.h" |
| 13 | #include "blk-mq.h" |
| 14 | #include "blk-mq-tag.h" |
| 15 | |
| 16 | bool blk_mq_has_free_tags(struct blk_mq_tags *tags) |
| 17 | { |
| 18 | if (!tags) |
| 19 | return true; |
| 20 | |
| 21 | return sbitmap_any_bit_clear(&tags->bitmap_tags.sb); |
| 22 | } |
| 23 | |
| 24 | /* |
| 25 | * If a previously inactive queue goes active, bump the active user count. |
| 26 | */ |
| 27 | bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) |
| 28 | { |
| 29 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && |
| 30 | !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
| 31 | atomic_inc(&hctx->tags->active_queues); |
| 32 | |
| 33 | return true; |
| 34 | } |
| 35 | |
| 36 | /* |
| 37 | * Wakeup all potentially sleeping on tags |
| 38 | */ |
| 39 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
| 40 | { |
| 41 | sbitmap_queue_wake_all(&tags->bitmap_tags); |
| 42 | if (include_reserve) |
| 43 | sbitmap_queue_wake_all(&tags->breserved_tags); |
| 44 | } |
| 45 | |
| 46 | /* |
| 47 | * If a previously busy queue goes inactive, potential waiters could now |
| 48 | * be allowed to queue. Wake them up and check. |
| 49 | */ |
| 50 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) |
| 51 | { |
| 52 | struct blk_mq_tags *tags = hctx->tags; |
| 53 | |
| 54 | if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
| 55 | return; |
| 56 | |
| 57 | atomic_dec(&tags->active_queues); |
| 58 | |
| 59 | blk_mq_tag_wakeup_all(tags, false); |
| 60 | } |
| 61 | |
| 62 | /* |
| 63 | * For shared tag users, we track the number of currently active users |
| 64 | * and attempt to provide a fair share of the tag depth for each of them. |
| 65 | */ |
| 66 | static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, |
| 67 | struct sbitmap_queue *bt) |
| 68 | { |
| 69 | unsigned int depth, users; |
| 70 | |
| 71 | if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) |
| 72 | return true; |
| 73 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
| 74 | return true; |
| 75 | |
| 76 | /* |
| 77 | * Don't try dividing an ant |
| 78 | */ |
| 79 | if (bt->sb.depth == 1) |
| 80 | return true; |
| 81 | |
| 82 | users = atomic_read(&hctx->tags->active_queues); |
| 83 | if (!users) |
| 84 | return true; |
| 85 | |
| 86 | /* |
| 87 | * Allow at least some tags |
| 88 | */ |
| 89 | depth = max((bt->sb.depth + users - 1) / users, 4U); |
| 90 | return atomic_read(&hctx->nr_active) < depth; |
| 91 | } |
| 92 | |
| 93 | static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, |
| 94 | struct sbitmap_queue *bt) |
| 95 | { |
| 96 | if (!(data->flags & BLK_MQ_REQ_INTERNAL) && |
| 97 | !hctx_may_queue(data->hctx, bt)) |
| 98 | return -1; |
| 99 | if (data->shallow_depth) |
| 100 | return __sbitmap_queue_get_shallow(bt, data->shallow_depth); |
| 101 | else |
| 102 | return __sbitmap_queue_get(bt); |
| 103 | } |
| 104 | |
| 105 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
| 106 | { |
| 107 | struct blk_mq_tags *tags = blk_mq_tags_from_data(data); |
| 108 | struct sbitmap_queue *bt; |
| 109 | struct sbq_wait_state *ws; |
| 110 | DEFINE_WAIT(wait); |
| 111 | unsigned int tag_offset; |
| 112 | bool drop_ctx; |
| 113 | int tag; |
| 114 | |
| 115 | if (data->flags & BLK_MQ_REQ_RESERVED) { |
| 116 | if (unlikely(!tags->nr_reserved_tags)) { |
| 117 | WARN_ON_ONCE(1); |
| 118 | return BLK_MQ_TAG_FAIL; |
| 119 | } |
| 120 | bt = &tags->breserved_tags; |
| 121 | tag_offset = 0; |
| 122 | } else { |
| 123 | bt = &tags->bitmap_tags; |
| 124 | tag_offset = tags->nr_reserved_tags; |
| 125 | } |
| 126 | |
| 127 | tag = __blk_mq_get_tag(data, bt); |
| 128 | if (tag != -1) |
| 129 | goto found_tag; |
| 130 | |
| 131 | if (data->flags & BLK_MQ_REQ_NOWAIT) |
| 132 | return BLK_MQ_TAG_FAIL; |
| 133 | |
| 134 | ws = bt_wait_ptr(bt, data->hctx); |
| 135 | drop_ctx = data->ctx == NULL; |
| 136 | do { |
| 137 | /* |
| 138 | * We're out of tags on this hardware queue, kick any |
| 139 | * pending IO submits before going to sleep waiting for |
| 140 | * some to complete. |
| 141 | */ |
| 142 | blk_mq_run_hw_queue(data->hctx, false); |
| 143 | |
| 144 | /* |
| 145 | * Retry tag allocation after running the hardware queue, |
| 146 | * as running the queue may also have found completions. |
| 147 | */ |
| 148 | tag = __blk_mq_get_tag(data, bt); |
| 149 | if (tag != -1) |
| 150 | break; |
| 151 | |
| 152 | prepare_to_wait_exclusive(&ws->wait, &wait, |
| 153 | TASK_UNINTERRUPTIBLE); |
| 154 | |
| 155 | tag = __blk_mq_get_tag(data, bt); |
| 156 | if (tag != -1) |
| 157 | break; |
| 158 | |
| 159 | if (data->ctx) |
| 160 | blk_mq_put_ctx(data->ctx); |
| 161 | |
| 162 | io_schedule(); |
| 163 | |
| 164 | data->ctx = blk_mq_get_ctx(data->q); |
| 165 | data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu); |
| 166 | tags = blk_mq_tags_from_data(data); |
| 167 | if (data->flags & BLK_MQ_REQ_RESERVED) |
| 168 | bt = &tags->breserved_tags; |
| 169 | else |
| 170 | bt = &tags->bitmap_tags; |
| 171 | |
| 172 | finish_wait(&ws->wait, &wait); |
| 173 | ws = bt_wait_ptr(bt, data->hctx); |
| 174 | } while (1); |
| 175 | |
| 176 | if (drop_ctx && data->ctx) |
| 177 | blk_mq_put_ctx(data->ctx); |
| 178 | |
| 179 | finish_wait(&ws->wait, &wait); |
| 180 | |
| 181 | found_tag: |
| 182 | return tag + tag_offset; |
| 183 | } |
| 184 | |
| 185 | void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, |
| 186 | struct blk_mq_ctx *ctx, unsigned int tag) |
| 187 | { |
| 188 | if (!blk_mq_tag_is_reserved(tags, tag)) { |
| 189 | const int real_tag = tag - tags->nr_reserved_tags; |
| 190 | |
| 191 | BUG_ON(real_tag >= tags->nr_tags); |
| 192 | sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); |
| 193 | } else { |
| 194 | BUG_ON(tag >= tags->nr_reserved_tags); |
| 195 | sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); |
| 196 | } |
| 197 | } |
| 198 | |
| 199 | struct bt_iter_data { |
| 200 | struct blk_mq_hw_ctx *hctx; |
| 201 | busy_iter_fn *fn; |
| 202 | void *data; |
| 203 | bool reserved; |
| 204 | }; |
| 205 | |
| 206 | static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
| 207 | { |
| 208 | struct bt_iter_data *iter_data = data; |
| 209 | struct blk_mq_hw_ctx *hctx = iter_data->hctx; |
| 210 | struct blk_mq_tags *tags = hctx->tags; |
| 211 | bool reserved = iter_data->reserved; |
| 212 | struct request *rq; |
| 213 | |
| 214 | if (!reserved) |
| 215 | bitnr += tags->nr_reserved_tags; |
| 216 | rq = tags->rqs[bitnr]; |
| 217 | |
| 218 | /* |
| 219 | * We can hit rq == NULL here, because the tagging functions |
| 220 | * test and set the bit before assining ->rqs[]. |
| 221 | */ |
| 222 | if (rq && rq->q == hctx->queue) |
| 223 | iter_data->fn(hctx, rq, iter_data->data, reserved); |
| 224 | return true; |
| 225 | } |
| 226 | |
| 227 | static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, |
| 228 | busy_iter_fn *fn, void *data, bool reserved) |
| 229 | { |
| 230 | struct bt_iter_data iter_data = { |
| 231 | .hctx = hctx, |
| 232 | .fn = fn, |
| 233 | .data = data, |
| 234 | .reserved = reserved, |
| 235 | }; |
| 236 | |
| 237 | sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data); |
| 238 | } |
| 239 | |
| 240 | struct bt_tags_iter_data { |
| 241 | struct blk_mq_tags *tags; |
| 242 | busy_tag_iter_fn *fn; |
| 243 | void *data; |
| 244 | bool reserved; |
| 245 | }; |
| 246 | |
| 247 | static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
| 248 | { |
| 249 | struct bt_tags_iter_data *iter_data = data; |
| 250 | struct blk_mq_tags *tags = iter_data->tags; |
| 251 | bool reserved = iter_data->reserved; |
| 252 | struct request *rq; |
| 253 | |
| 254 | if (!reserved) |
| 255 | bitnr += tags->nr_reserved_tags; |
| 256 | |
| 257 | /* |
| 258 | * We can hit rq == NULL here, because the tagging functions |
| 259 | * test and set the bit before assining ->rqs[]. |
| 260 | */ |
| 261 | rq = tags->rqs[bitnr]; |
| 262 | if (rq) |
| 263 | iter_data->fn(rq, iter_data->data, reserved); |
| 264 | |
| 265 | return true; |
| 266 | } |
| 267 | |
| 268 | static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, |
| 269 | busy_tag_iter_fn *fn, void *data, bool reserved) |
| 270 | { |
| 271 | struct bt_tags_iter_data iter_data = { |
| 272 | .tags = tags, |
| 273 | .fn = fn, |
| 274 | .data = data, |
| 275 | .reserved = reserved, |
| 276 | }; |
| 277 | |
| 278 | if (tags->rqs) |
| 279 | sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data); |
| 280 | } |
| 281 | |
| 282 | static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, |
| 283 | busy_tag_iter_fn *fn, void *priv) |
| 284 | { |
| 285 | if (tags->nr_reserved_tags) |
| 286 | bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true); |
| 287 | bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false); |
| 288 | } |
| 289 | |
| 290 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
| 291 | busy_tag_iter_fn *fn, void *priv) |
| 292 | { |
| 293 | int i; |
| 294 | |
| 295 | for (i = 0; i < tagset->nr_hw_queues; i++) { |
| 296 | if (tagset->tags && tagset->tags[i]) |
| 297 | blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); |
| 298 | } |
| 299 | } |
| 300 | EXPORT_SYMBOL(blk_mq_tagset_busy_iter); |
| 301 | |
| 302 | int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data, |
| 303 | int (fn)(void *, struct request *)) |
| 304 | { |
| 305 | int i, j, ret = 0; |
| 306 | |
| 307 | if (WARN_ON_ONCE(!fn)) |
| 308 | goto out; |
| 309 | |
| 310 | for (i = 0; i < set->nr_hw_queues; i++) { |
| 311 | struct blk_mq_tags *tags = set->tags[i]; |
| 312 | |
| 313 | if (!tags) |
| 314 | continue; |
| 315 | |
| 316 | for (j = 0; j < tags->nr_tags; j++) { |
| 317 | if (!tags->static_rqs[j]) |
| 318 | continue; |
| 319 | |
| 320 | ret = fn(data, tags->static_rqs[j]); |
| 321 | if (ret) |
| 322 | goto out; |
| 323 | } |
| 324 | } |
| 325 | |
| 326 | out: |
| 327 | return ret; |
| 328 | } |
| 329 | EXPORT_SYMBOL_GPL(blk_mq_tagset_iter); |
| 330 | |
| 331 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, |
| 332 | void *priv) |
| 333 | { |
| 334 | struct blk_mq_hw_ctx *hctx; |
| 335 | int i; |
| 336 | |
| 337 | |
| 338 | queue_for_each_hw_ctx(q, hctx, i) { |
| 339 | struct blk_mq_tags *tags = hctx->tags; |
| 340 | |
| 341 | /* |
| 342 | * If not software queues are currently mapped to this |
| 343 | * hardware queue, there's nothing to check |
| 344 | */ |
| 345 | if (!blk_mq_hw_queue_mapped(hctx)) |
| 346 | continue; |
| 347 | |
| 348 | if (tags->nr_reserved_tags) |
| 349 | bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); |
| 350 | bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); |
| 351 | } |
| 352 | |
| 353 | } |
| 354 | |
| 355 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, |
| 356 | bool round_robin, int node) |
| 357 | { |
| 358 | return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL, |
| 359 | node); |
| 360 | } |
| 361 | |
| 362 | static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, |
| 363 | int node, int alloc_policy) |
| 364 | { |
| 365 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; |
| 366 | bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; |
| 367 | |
| 368 | if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node)) |
| 369 | goto free_tags; |
| 370 | if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin, |
| 371 | node)) |
| 372 | goto free_bitmap_tags; |
| 373 | |
| 374 | return tags; |
| 375 | free_bitmap_tags: |
| 376 | sbitmap_queue_free(&tags->bitmap_tags); |
| 377 | free_tags: |
| 378 | kfree(tags); |
| 379 | return NULL; |
| 380 | } |
| 381 | |
| 382 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
| 383 | unsigned int reserved_tags, |
| 384 | int node, int alloc_policy) |
| 385 | { |
| 386 | struct blk_mq_tags *tags; |
| 387 | |
| 388 | if (total_tags > BLK_MQ_TAG_MAX) { |
| 389 | pr_err("blk-mq: tag depth too large\n"); |
| 390 | return NULL; |
| 391 | } |
| 392 | |
| 393 | tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); |
| 394 | if (!tags) |
| 395 | return NULL; |
| 396 | |
| 397 | tags->nr_tags = total_tags; |
| 398 | tags->nr_reserved_tags = reserved_tags; |
| 399 | |
| 400 | return blk_mq_init_bitmap_tags(tags, node, alloc_policy); |
| 401 | } |
| 402 | |
| 403 | void blk_mq_free_tags(struct blk_mq_tags *tags) |
| 404 | { |
| 405 | sbitmap_queue_free(&tags->bitmap_tags); |
| 406 | sbitmap_queue_free(&tags->breserved_tags); |
| 407 | kfree(tags); |
| 408 | } |
| 409 | |
| 410 | int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, |
| 411 | struct blk_mq_tags **tagsptr, unsigned int tdepth, |
| 412 | bool can_grow) |
| 413 | { |
| 414 | struct blk_mq_tags *tags = *tagsptr; |
| 415 | |
| 416 | if (tdepth <= tags->nr_reserved_tags) |
| 417 | return -EINVAL; |
| 418 | |
| 419 | tdepth -= tags->nr_reserved_tags; |
| 420 | |
| 421 | /* |
| 422 | * If we are allowed to grow beyond the original size, allocate |
| 423 | * a new set of tags before freeing the old one. |
| 424 | */ |
| 425 | if (tdepth > tags->nr_tags) { |
| 426 | struct blk_mq_tag_set *set = hctx->queue->tag_set; |
| 427 | struct blk_mq_tags *new; |
| 428 | bool ret; |
| 429 | |
| 430 | if (!can_grow) |
| 431 | return -EINVAL; |
| 432 | |
| 433 | /* |
| 434 | * We need some sort of upper limit, set it high enough that |
| 435 | * no valid use cases should require more. |
| 436 | */ |
| 437 | if (tdepth > 16 * BLKDEV_MAX_RQ) |
| 438 | return -EINVAL; |
| 439 | |
| 440 | new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0); |
| 441 | if (!new) |
| 442 | return -ENOMEM; |
| 443 | ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); |
| 444 | if (ret) { |
| 445 | blk_mq_free_rq_map(new); |
| 446 | return -ENOMEM; |
| 447 | } |
| 448 | |
| 449 | blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); |
| 450 | blk_mq_free_rq_map(*tagsptr); |
| 451 | *tagsptr = new; |
| 452 | } else { |
| 453 | /* |
| 454 | * Don't need (or can't) update reserved tags here, they |
| 455 | * remain static and should never need resizing. |
| 456 | */ |
| 457 | sbitmap_queue_resize(&tags->bitmap_tags, tdepth); |
| 458 | } |
| 459 | |
| 460 | return 0; |
| 461 | } |
| 462 | |
| 463 | /** |
| 464 | * blk_mq_unique_tag() - return a tag that is unique queue-wide |
| 465 | * @rq: request for which to compute a unique tag |
| 466 | * |
| 467 | * The tag field in struct request is unique per hardware queue but not over |
| 468 | * all hardware queues. Hence this function that returns a tag with the |
| 469 | * hardware context index in the upper bits and the per hardware queue tag in |
| 470 | * the lower bits. |
| 471 | * |
| 472 | * Note: When called for a request that is queued on a non-multiqueue request |
| 473 | * queue, the hardware context index is set to zero. |
| 474 | */ |
| 475 | u32 blk_mq_unique_tag(struct request *rq) |
| 476 | { |
| 477 | struct request_queue *q = rq->q; |
| 478 | struct blk_mq_hw_ctx *hctx; |
| 479 | int hwq = 0; |
| 480 | |
| 481 | if (q->mq_ops) { |
| 482 | hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); |
| 483 | hwq = hctx->queue_num; |
| 484 | } |
| 485 | |
| 486 | return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | |
| 487 | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); |
| 488 | } |
| 489 | EXPORT_SYMBOL(blk_mq_unique_tag); |