1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include "blk-mq-tag.h"
11 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
16 struct list_head rq_list;
17 } ____cacheline_aligned_in_smp;
20 unsigned int index_hw;
22 /* incremented at dispatch time */
23 unsigned long rq_dispatched[2];
24 unsigned long rq_merged;
26 /* incremented at completion time */
27 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
29 struct request_queue *queue;
31 } ____cacheline_aligned_in_smp;
34 * Bits for request->gstate. The lower two bits carry MQ_RQ_* state value
35 * and the upper bits the generation number.
43 MQ_RQ_STATE_MASK = (1 << MQ_RQ_STATE_BITS) - 1,
44 MQ_RQ_GEN_INC = 1 << MQ_RQ_STATE_BITS,
47 void blk_mq_freeze_queue(struct request_queue *q);
48 void blk_mq_free_queue(struct request_queue *q);
49 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
50 void blk_mq_wake_waiters(struct request_queue *q);
51 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
52 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
53 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
55 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
56 struct blk_mq_ctx *start);
59 * Internal helpers for allocating/freeing the request map
61 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
62 unsigned int hctx_idx);
63 void blk_mq_free_rq_map(struct blk_mq_tags *tags);
64 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
65 unsigned int hctx_idx,
67 unsigned int reserved_tags);
68 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
69 unsigned int hctx_idx, unsigned int depth);
72 * Internal helpers for request insertion into sw queues
74 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
76 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
77 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
78 struct list_head *list);
80 /* Used by blk_insert_cloned_request() to issue request directly */
81 blk_status_t blk_mq_request_issue_directly(struct request *rq);
84 * CPU -> queue mappings
86 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
88 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
91 return q->queue_hw_ctx[q->mq_map[cpu]];
97 extern void blk_mq_sysfs_init(struct request_queue *q);
98 extern void blk_mq_sysfs_deinit(struct request_queue *q);
99 extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
100 extern int blk_mq_sysfs_register(struct request_queue *q);
101 extern void blk_mq_sysfs_unregister(struct request_queue *q);
102 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
104 void blk_mq_release(struct request_queue *q);
107 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
108 * @rq: target request.
110 static inline int blk_mq_rq_state(struct request *rq)
112 return READ_ONCE(rq->gstate) & MQ_RQ_STATE_MASK;
116 * blk_mq_rq_update_state() - set the current MQ_RQ_* state of a request
117 * @rq: target request.
118 * @state: new state to set.
120 * Set @rq's state to @state. The caller is responsible for ensuring that
121 * there are no other updaters. A request can transition into IN_FLIGHT
122 * only from IDLE and doing so increments the generation number.
124 static inline void blk_mq_rq_update_state(struct request *rq,
125 enum mq_rq_state state)
127 u64 old_val = READ_ONCE(rq->gstate);
128 u64 new_val = (old_val & ~MQ_RQ_STATE_MASK) | state;
130 if (state == MQ_RQ_IN_FLIGHT) {
131 WARN_ON_ONCE((old_val & MQ_RQ_STATE_MASK) != MQ_RQ_IDLE);
132 new_val += MQ_RQ_GEN_INC;
135 /* avoid exposing interim values */
136 WRITE_ONCE(rq->gstate, new_val);
139 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
142 return per_cpu_ptr(q->queue_ctx, cpu);
146 * This assumes per-cpu software queueing queues. They could be per-node
147 * as well, for instance. For now this is hardcoded as-is. Note that we don't
148 * care about preemption, since we know the ctx's are persistent. This does
149 * mean that we can't rely on ctx always matching the currently running CPU.
151 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
153 return __blk_mq_get_ctx(q, get_cpu());
156 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
161 struct blk_mq_alloc_data {
162 /* input parameter */
163 struct request_queue *q;
164 blk_mq_req_flags_t flags;
165 unsigned int shallow_depth;
167 /* input & output parameter */
168 struct blk_mq_ctx *ctx;
169 struct blk_mq_hw_ctx *hctx;
172 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
174 if (data->flags & BLK_MQ_REQ_INTERNAL)
175 return data->hctx->sched_tags;
177 return data->hctx->tags;
180 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
182 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
185 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
187 return hctx->nr_ctx && hctx->tags;
190 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
191 unsigned int inflight[2]);
192 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
193 unsigned int inflight[2]);
195 static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
197 struct request_queue *q = hctx->queue;
199 if (q->mq_ops->put_budget)
200 q->mq_ops->put_budget(hctx);
203 static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
205 struct request_queue *q = hctx->queue;
207 if (q->mq_ops->get_budget)
208 return q->mq_ops->get_budget(hctx);
212 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
215 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
218 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
219 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
220 atomic_dec(&hctx->nr_active);
224 static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
227 if (rq->tag == -1 || rq->internal_tag == -1)
230 __blk_mq_put_driver_tag(hctx, rq);
233 static inline void blk_mq_put_driver_tag(struct request *rq)
235 struct blk_mq_hw_ctx *hctx;
237 if (rq->tag == -1 || rq->internal_tag == -1)
240 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
241 __blk_mq_put_driver_tag(hctx, rq);