| 1 | #ifndef INT_BLK_MQ_TAG_H |
| 2 | #define INT_BLK_MQ_TAG_H |
| 3 | |
| 4 | #include "blk-mq.h" |
| 5 | |
| 6 | enum { |
| 7 | BT_WAIT_QUEUES = 8, |
| 8 | BT_WAIT_BATCH = 8, |
| 9 | }; |
| 10 | |
| 11 | struct bt_wait_state { |
| 12 | atomic_t wait_cnt; |
| 13 | wait_queue_head_t wait; |
| 14 | } ____cacheline_aligned_in_smp; |
| 15 | |
| 16 | #define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word) |
| 17 | #define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1)) |
| 18 | |
| 19 | struct blk_mq_bitmap_tags { |
| 20 | unsigned int depth; |
| 21 | unsigned int wake_cnt; |
| 22 | unsigned int bits_per_word; |
| 23 | |
| 24 | unsigned int map_nr; |
| 25 | struct blk_align_bitmap *map; |
| 26 | |
| 27 | atomic_t wake_index; |
| 28 | struct bt_wait_state *bs; |
| 29 | }; |
| 30 | |
| 31 | /* |
| 32 | * Tag address space map. |
| 33 | */ |
| 34 | struct blk_mq_tags { |
| 35 | unsigned int nr_tags; |
| 36 | unsigned int nr_reserved_tags; |
| 37 | |
| 38 | atomic_t active_queues; |
| 39 | |
| 40 | struct blk_mq_bitmap_tags bitmap_tags; |
| 41 | struct blk_mq_bitmap_tags breserved_tags; |
| 42 | |
| 43 | struct request **rqs; |
| 44 | struct list_head page_list; |
| 45 | |
| 46 | int alloc_policy; |
| 47 | cpumask_var_t cpumask; |
| 48 | }; |
| 49 | |
| 50 | |
| 51 | extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy); |
| 52 | extern void blk_mq_free_tags(struct blk_mq_tags *tags); |
| 53 | |
| 54 | extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); |
| 55 | extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); |
| 56 | extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); |
| 57 | extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); |
| 58 | extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); |
| 59 | extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); |
| 60 | extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); |
| 61 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, |
| 62 | void *priv); |
| 63 | |
| 64 | enum { |
| 65 | BLK_MQ_TAG_CACHE_MIN = 1, |
| 66 | BLK_MQ_TAG_CACHE_MAX = 64, |
| 67 | }; |
| 68 | |
| 69 | enum { |
| 70 | BLK_MQ_TAG_FAIL = -1U, |
| 71 | BLK_MQ_TAG_MIN = BLK_MQ_TAG_CACHE_MIN, |
| 72 | BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1, |
| 73 | }; |
| 74 | |
| 75 | extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *); |
| 76 | extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); |
| 77 | |
| 78 | static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) |
| 79 | { |
| 80 | if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) |
| 81 | return false; |
| 82 | |
| 83 | return __blk_mq_tag_busy(hctx); |
| 84 | } |
| 85 | |
| 86 | static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) |
| 87 | { |
| 88 | if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) |
| 89 | return; |
| 90 | |
| 91 | __blk_mq_tag_idle(hctx); |
| 92 | } |
| 93 | |
| 94 | /* |
| 95 | * This helper should only be used for flush request to share tag |
| 96 | * with the request cloned from, and both the two requests can't be |
| 97 | * in flight at the same time. The caller has to make sure the tag |
| 98 | * can't be freed. |
| 99 | */ |
| 100 | static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx, |
| 101 | unsigned int tag, struct request *rq) |
| 102 | { |
| 103 | hctx->tags->rqs[tag] = rq; |
| 104 | } |
| 105 | |
| 106 | #endif |