block: remove revalidate_disk()
[linux-block.git] / block / blk-mq-tag.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
320ae51f
JA
2#ifndef INT_BLK_MQ_TAG_H
3#define INT_BLK_MQ_TAG_H
4
e93ecf60
JA
5#include "blk-mq.h"
6
24d2f903
CH
7/*
8 * Tag address space map.
9 */
10struct blk_mq_tags {
11 unsigned int nr_tags;
12 unsigned int nr_reserved_tags;
24d2f903 13
0d2602ca
JA
14 atomic_t active_queues;
15
88459642
OS
16 struct sbitmap_queue bitmap_tags;
17 struct sbitmap_queue breserved_tags;
24d2f903
CH
18
19 struct request **rqs;
2af8cbe3 20 struct request **static_rqs;
24d2f903
CH
21 struct list_head page_list;
22};
23
320ae51f 24
24391c0d 25extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
320ae51f
JA
26extern void blk_mq_free_tags(struct blk_mq_tags *tags);
27
cb96a42c 28extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
cae740a0
JG
29extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
30 unsigned int tag);
70f36b60
JA
31extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
32 struct blk_mq_tags **tags,
33 unsigned int depth, bool can_grow);
aed3ea94 34extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
0bf6cd5b
CH
35void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
36 void *priv);
602380d2
ML
37void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
38 void *priv);
320ae51f 39
88459642
OS
40static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
41 struct blk_mq_hw_ctx *hctx)
42{
43 if (!hctx)
44 return &bt->ws[0];
45 return sbq_wait_ptr(bt, &hctx->wait_index);
46}
47
320ae51f 48enum {
419c3d5e 49 BLK_MQ_NO_TAG = -1U,
5385fa47 50 BLK_MQ_TAG_MIN = 1,
419c3d5e 51 BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
320ae51f
JA
52};
53
0d2602ca
JA
54extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
55extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
56
57static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
58{
59 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
60 return false;
61
62 return __blk_mq_tag_busy(hctx);
63}
64
65static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
66{
67 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
68 return;
69
70 __blk_mq_tag_idle(hctx);
71}
72
570e9b73
ML
73/*
74 * For shared tag users, we track the number of currently active users
75 * and attempt to provide a fair share of the tag depth for each of them.
76 */
77static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
78 struct sbitmap_queue *bt)
79{
80 unsigned int depth, users;
81
82 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
83 return true;
84 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
85 return true;
86
87 /*
88 * Don't try dividing an ant
89 */
90 if (bt->sb.depth == 1)
91 return true;
92
93 users = atomic_read(&hctx->tags->active_queues);
94 if (!users)
95 return true;
96
97 /*
98 * Allow at least some tags
99 */
100 depth = max((bt->sb.depth + users - 1) / users, 4U);
101 return atomic_read(&hctx->nr_active) < depth;
102}
103
415b806d
SG
104static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
105 unsigned int tag)
106{
107 return tag < tags->nr_reserved_tags;
108}
109
320ae51f 110#endif