blk-mq: save queue mapping result into ctx directly
[linux-block.git] / block / blk-mq.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
320ae51f
JA
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
cf43e6be 5#include "blk-stat.h"
244c65a3 6#include "blk-mq-tag.h"
cf43e6be 7
24d2f903
CH
8struct blk_mq_tag_set;
9
1db4909e
ML
10struct blk_mq_ctxs {
11 struct kobject kobj;
12 struct blk_mq_ctx __percpu *queue_ctx;
13};
14
fe644072
LW
15/**
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17 */
320ae51f
JA
18struct blk_mq_ctx {
19 struct {
20 spinlock_t lock;
c16d6b5a
ML
21 struct list_head rq_lists[HCTX_MAX_TYPES];
22 } ____cacheline_aligned_in_smp;
320ae51f
JA
23
24 unsigned int cpu;
f31967f0 25 unsigned short index_hw[HCTX_MAX_TYPES];
8ccdf4a3 26 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
320ae51f
JA
27
28 /* incremented at dispatch time */
29 unsigned long rq_dispatched[2];
30 unsigned long rq_merged;
31
32 /* incremented at completion time */
33 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
34
35 struct request_queue *queue;
1db4909e 36 struct blk_mq_ctxs *ctxs;
320ae51f 37 struct kobject kobj;
4bb659b1 38} ____cacheline_aligned_in_smp;
320ae51f 39
780db207 40void blk_mq_freeze_queue(struct request_queue *q);
3edcc0ce 41void blk_mq_free_queue(struct request_queue *q);
e3a2b3f9 42int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94 43void blk_mq_wake_waiters(struct request_queue *q);
de148297 44bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
2c3ad667 45void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
8ab6bb9e 46bool blk_mq_get_driver_tag(struct request *rq);
b347689f
ML
47struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
48 struct blk_mq_ctx *start);
2c3ad667
JA
49
50/*
51 * Internal helpers for allocating/freeing the request map
52 */
cc71a6f4
JA
53void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
54 unsigned int hctx_idx);
55void blk_mq_free_rq_map(struct blk_mq_tags *tags);
56struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
57 unsigned int hctx_idx,
58 unsigned int nr_tags,
59 unsigned int reserved_tags);
60int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
61 unsigned int hctx_idx, unsigned int depth);
2c3ad667
JA
62
63/*
64 * Internal helpers for request insertion into sw queues
65 */
66void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
67 bool at_head);
b0850297 68void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
bd166ef1
JA
69void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
70 struct list_head *list);
320ae51f 71
d6a51a97
JW
72blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
73 struct request *rq,
74 blk_qc_t *cookie,
75 bool bypass, bool last);
6ce3dd6e
ML
76void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
77 struct list_head *list);
396eaf21 78
320ae51f
JA
79/*
80 * CPU -> queue mappings
81 */
ed76e329 82extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
320ae51f 83
b3c661b1
JA
84/*
85 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
86 * @q: request queue
e20ba6e1 87 * @type: the hctx type index
b3c661b1
JA
88 * @cpu: CPU
89 */
90static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
e20ba6e1 91 enum hctx_type type,
b3c661b1 92 unsigned int cpu)
7d7e0f90 93{
e20ba6e1 94 return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
7d7e0f90
CH
95}
96
b3c661b1
JA
97/*
98 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
99 * @q: request queue
100 * @flags: request command flags
8ccdf4a3 101 * @cpu: cpu ctx
b3c661b1
JA
102 */
103static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
104 unsigned int flags,
8ccdf4a3 105 struct blk_mq_ctx *ctx)
ff2c5660 106{
e20ba6e1
CH
107 enum hctx_type type = HCTX_TYPE_DEFAULT;
108
5aceaeb2
CH
109 if ((flags & REQ_HIPRI) &&
110 q->tag_set->nr_maps > HCTX_TYPE_POLL &&
111 q->tag_set->map[HCTX_TYPE_POLL].nr_queues &&
112 test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
e20ba6e1 113 type = HCTX_TYPE_POLL;
b3c661b1 114
5aceaeb2
CH
115 else if (((flags & REQ_OP_MASK) == REQ_OP_READ) &&
116 q->tag_set->nr_maps > HCTX_TYPE_READ &&
117 q->tag_set->map[HCTX_TYPE_READ].nr_queues)
e20ba6e1 118 type = HCTX_TYPE_READ;
5aceaeb2 119
8ccdf4a3 120 return ctx->hctxs[type];
ff2c5660
JA
121}
122
67aec14c
JA
123/*
124 * sysfs helpers
125 */
737f98cf 126extern void blk_mq_sysfs_init(struct request_queue *q);
7ea5fe31 127extern void blk_mq_sysfs_deinit(struct request_queue *q);
2d0364c8 128extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
67aec14c
JA
129extern int blk_mq_sysfs_register(struct request_queue *q);
130extern void blk_mq_sysfs_unregister(struct request_queue *q);
868f2f0b 131extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
67aec14c 132
e09aae7e
ML
133void blk_mq_release(struct request_queue *q);
134
1d9bd516
TH
135/**
136 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
137 * @rq: target request.
138 */
12f5b931 139static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
1d9bd516 140{
12f5b931 141 return READ_ONCE(rq->state);
1d9bd516
TH
142}
143
1aecfe48
ML
144static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
145 unsigned int cpu)
146{
147 return per_cpu_ptr(q->queue_ctx, cpu);
148}
149
150/*
151 * This assumes per-cpu software queueing queues. They could be per-node
152 * as well, for instance. For now this is hardcoded as-is. Note that we don't
153 * care about preemption, since we know the ctx's are persistent. This does
154 * mean that we can't rely on ctx always matching the currently running CPU.
155 */
156static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
157{
158 return __blk_mq_get_ctx(q, get_cpu());
159}
160
161static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
162{
163 put_cpu();
164}
165
cb96a42c
ML
166struct blk_mq_alloc_data {
167 /* input parameter */
168 struct request_queue *q;
9a95e4ef 169 blk_mq_req_flags_t flags;
229a9287 170 unsigned int shallow_depth;
f9afca4d 171 unsigned int cmd_flags;
cb96a42c
ML
172
173 /* input & output parameter */
174 struct blk_mq_ctx *ctx;
175 struct blk_mq_hw_ctx *hctx;
176};
177
4941115b
JA
178static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
179{
bd166ef1
JA
180 if (data->flags & BLK_MQ_REQ_INTERNAL)
181 return data->hctx->sched_tags;
182
4941115b
JA
183 return data->hctx->tags;
184}
185
5d1b25c1
BVA
186static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
187{
188 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
189}
190
19c66e59
ML
191static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
192{
193 return hctx->nr_ctx && hctx->tags;
194}
195
e016b782 196unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
bf0ddaba
OS
197void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
198 unsigned int inflight[2]);
f299b7c7 199
de148297
ML
200static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
201{
202 struct request_queue *q = hctx->queue;
203
204 if (q->mq_ops->put_budget)
205 q->mq_ops->put_budget(hctx);
206}
207
88022d72 208static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
de148297
ML
209{
210 struct request_queue *q = hctx->queue;
211
212 if (q->mq_ops->get_budget)
213 return q->mq_ops->get_budget(hctx);
88022d72 214 return true;
de148297
ML
215}
216
244c65a3
ML
217static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
218 struct request *rq)
219{
220 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
221 rq->tag = -1;
222
223 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
224 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
225 atomic_dec(&hctx->nr_active);
226 }
227}
228
229static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
230 struct request *rq)
231{
232 if (rq->tag == -1 || rq->internal_tag == -1)
233 return;
234
235 __blk_mq_put_driver_tag(hctx, rq);
236}
237
238static inline void blk_mq_put_driver_tag(struct request *rq)
239{
244c65a3
ML
240 if (rq->tag == -1 || rq->internal_tag == -1)
241 return;
242
ea4f995e 243 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
244c65a3
ML
244}
245
ed76e329 246static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
0da73d00
MI
247{
248 int cpu;
249
250 for_each_possible_cpu(cpu)
ed76e329 251 qmap->mq_map[cpu] = 0;
0da73d00
MI
252}
253
320ae51f 254#endif