Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
320ae51f JA |
2 | #ifndef BLK_MQ_H |
3 | #define BLK_MQ_H | |
4 | ||
5 | #include <linux/blkdev.h> | |
88459642 | 6 | #include <linux/sbitmap.h> |
6a83e74d | 7 | #include <linux/srcu.h> |
320ae51f JA |
8 | |
9 | struct blk_mq_tags; | |
f70ced09 | 10 | struct blk_flush_queue; |
320ae51f | 11 | |
fe644072 LW |
12 | /** |
13 | * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device | |
14 | */ | |
320ae51f JA |
15 | struct blk_mq_hw_ctx { |
16 | struct { | |
17 | spinlock_t lock; | |
18 | struct list_head dispatch; | |
8d354f13 | 19 | unsigned long state; /* BLK_MQ_S_* flags */ |
320ae51f JA |
20 | } ____cacheline_aligned_in_smp; |
21 | ||
9f993737 | 22 | struct delayed_work run_work; |
e4043dcf | 23 | cpumask_var_t cpumask; |
506e931f JA |
24 | int next_cpu; |
25 | int next_cpu_batch; | |
320ae51f JA |
26 | |
27 | unsigned long flags; /* BLK_MQ_F_* flags */ | |
28 | ||
bd166ef1 | 29 | void *sched_data; |
320ae51f | 30 | struct request_queue *queue; |
f70ced09 | 31 | struct blk_flush_queue *fq; |
320ae51f JA |
32 | |
33 | void *driver_data; | |
34 | ||
88459642 | 35 | struct sbitmap ctx_map; |
1429d7c9 | 36 | |
b347689f | 37 | struct blk_mq_ctx *dispatch_from; |
6e768717 | 38 | unsigned int dispatch_busy; |
b347689f | 39 | |
f31967f0 JA |
40 | unsigned short type; |
41 | unsigned short nr_ctx; | |
6e768717 | 42 | struct blk_mq_ctx **ctxs; |
4bb659b1 | 43 | |
5815839b | 44 | spinlock_t dispatch_wait_lock; |
eb619fdb | 45 | wait_queue_entry_t dispatch_wait; |
8537b120 | 46 | atomic_t wait_index; |
320ae51f | 47 | |
320ae51f | 48 | struct blk_mq_tags *tags; |
bd166ef1 | 49 | struct blk_mq_tags *sched_tags; |
320ae51f JA |
50 | |
51 | unsigned long queued; | |
52 | unsigned long run; | |
8d354f13 | 53 | #define BLK_MQ_MAX_DISPATCH_ORDER 7 |
320ae51f JA |
54 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; |
55 | ||
320ae51f | 56 | unsigned int numa_node; |
17ded320 | 57 | unsigned int queue_num; |
320ae51f | 58 | |
0d2602ca JA |
59 | atomic_t nr_active; |
60 | ||
9467f859 | 61 | struct hlist_node cpuhp_dead; |
320ae51f | 62 | struct kobject kobj; |
05229bee | 63 | |
6e219353 | 64 | unsigned long poll_considered; |
05229bee JA |
65 | unsigned long poll_invoked; |
66 | unsigned long poll_success; | |
9c1051aa OS |
67 | |
68 | #ifdef CONFIG_BLK_DEBUG_FS | |
69 | struct dentry *debugfs_dir; | |
d332ce09 | 70 | struct dentry *sched_debugfs_dir; |
9c1051aa | 71 | #endif |
07319678 | 72 | |
2f8f1336 ML |
73 | struct list_head hctx_list; |
74 | ||
07319678 | 75 | /* Must be the last member - see also blk_mq_hw_ctx_size(). */ |
05707b64 | 76 | struct srcu_struct srcu[0]; |
320ae51f JA |
77 | }; |
78 | ||
ed76e329 JA |
79 | struct blk_mq_queue_map { |
80 | unsigned int *mq_map; | |
81 | unsigned int nr_queues; | |
843477d4 | 82 | unsigned int queue_offset; |
ed76e329 JA |
83 | }; |
84 | ||
e20ba6e1 CH |
85 | enum hctx_type { |
86 | HCTX_TYPE_DEFAULT, /* all I/O not otherwise accounted for */ | |
87 | HCTX_TYPE_READ, /* just for READ I/O */ | |
88 | HCTX_TYPE_POLL, /* polled I/O of any kind */ | |
89 | ||
90 | HCTX_MAX_TYPES, | |
ed76e329 JA |
91 | }; |
92 | ||
24d2f903 | 93 | struct blk_mq_tag_set { |
b3c661b1 JA |
94 | /* |
95 | * map[] holds ctx -> hctx mappings, one map exists for each type | |
96 | * that the driver wishes to support. There are no restrictions | |
97 | * on maps being of the same size, and it's perfectly legal to | |
98 | * share maps between types. | |
99 | */ | |
ed76e329 | 100 | struct blk_mq_queue_map map[HCTX_MAX_TYPES]; |
b3c661b1 | 101 | unsigned int nr_maps; /* nr entries in map[] */ |
f8a5b122 | 102 | const struct blk_mq_ops *ops; |
ed76e329 | 103 | unsigned int nr_hw_queues; /* nr hw queues across maps */ |
e3a2b3f9 | 104 | unsigned int queue_depth; /* max hw supported */ |
320ae51f JA |
105 | unsigned int reserved_tags; |
106 | unsigned int cmd_size; /* per-request extra data */ | |
107 | int numa_node; | |
108 | unsigned int timeout; | |
109 | unsigned int flags; /* BLK_MQ_F_* */ | |
24d2f903 CH |
110 | void *driver_data; |
111 | ||
112 | struct blk_mq_tags **tags; | |
0d2602ca JA |
113 | |
114 | struct mutex tag_list_lock; | |
115 | struct list_head tag_list; | |
320ae51f JA |
116 | }; |
117 | ||
74c45052 JA |
118 | struct blk_mq_queue_data { |
119 | struct request *rq; | |
74c45052 JA |
120 | bool last; |
121 | }; | |
122 | ||
fc17b653 CH |
123 | typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, |
124 | const struct blk_mq_queue_data *); | |
d666ba98 | 125 | typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *); |
88022d72 | 126 | typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *); |
de148297 | 127 | typedef void (put_budget_fn)(struct blk_mq_hw_ctx *); |
0152fb6b | 128 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); |
320ae51f JA |
129 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); |
130 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); | |
d6296d39 | 131 | typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, |
24d2f903 | 132 | unsigned int, unsigned int); |
d6296d39 | 133 | typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, |
24d2f903 | 134 | unsigned int); |
320ae51f | 135 | |
7baa8572 | 136 | typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, |
81481eb4 | 137 | bool); |
7baa8572 | 138 | typedef bool (busy_tag_iter_fn)(struct request *, void *, bool); |
9743139c | 139 | typedef int (poll_fn)(struct blk_mq_hw_ctx *); |
da695ba2 | 140 | typedef int (map_queues_fn)(struct blk_mq_tag_set *set); |
9ba20527 | 141 | typedef bool (busy_fn)(struct request_queue *); |
c7bb9ad1 | 142 | typedef void (complete_fn)(struct request *); |
05229bee | 143 | |
81481eb4 | 144 | |
320ae51f JA |
145 | struct blk_mq_ops { |
146 | /* | |
147 | * Queue request | |
148 | */ | |
149 | queue_rq_fn *queue_rq; | |
150 | ||
d666ba98 JA |
151 | /* |
152 | * If a driver uses bd->last to judge when to submit requests to | |
153 | * hardware, it must define this function. In case of errors that | |
154 | * make us stop issuing further requests, this hook serves the | |
155 | * purpose of kicking the hardware (which the last request otherwise | |
156 | * would have done). | |
157 | */ | |
158 | commit_rqs_fn *commit_rqs; | |
159 | ||
de148297 ML |
160 | /* |
161 | * Reserve budget before queue request, once .queue_rq is | |
162 | * run, it is driver's responsibility to release the | |
163 | * reserved budget. Also we have to handle failure case | |
164 | * of .get_budget for avoiding I/O deadlock. | |
165 | */ | |
166 | get_budget_fn *get_budget; | |
167 | put_budget_fn *put_budget; | |
168 | ||
320ae51f JA |
169 | /* |
170 | * Called on request timeout | |
171 | */ | |
0152fb6b | 172 | timeout_fn *timeout; |
320ae51f | 173 | |
05229bee JA |
174 | /* |
175 | * Called to poll for completion of a specific tag. | |
176 | */ | |
177 | poll_fn *poll; | |
178 | ||
c7bb9ad1 | 179 | complete_fn *complete; |
30a91cb4 | 180 | |
320ae51f JA |
181 | /* |
182 | * Called when the block layer side of a hardware queue has been | |
183 | * set up, allowing the driver to allocate/init matching structures. | |
184 | * Ditto for exit/teardown. | |
185 | */ | |
186 | init_hctx_fn *init_hctx; | |
187 | exit_hctx_fn *exit_hctx; | |
e9b267d9 CH |
188 | |
189 | /* | |
190 | * Called for every command allocated by the block layer to allow | |
191 | * the driver to set up driver specific data. | |
f70ced09 ML |
192 | * |
193 | * Tag greater than or equal to queue_depth is for setting up | |
194 | * flush request. | |
195 | * | |
e9b267d9 CH |
196 | * Ditto for exit/teardown. |
197 | */ | |
198 | init_request_fn *init_request; | |
199 | exit_request_fn *exit_request; | |
d280bab3 BVA |
200 | /* Called from inside blk_get_request() */ |
201 | void (*initialize_rq_fn)(struct request *rq); | |
da695ba2 | 202 | |
9ba20527 JA |
203 | /* |
204 | * If set, returns whether or not this queue currently is busy | |
205 | */ | |
206 | busy_fn *busy; | |
207 | ||
da695ba2 | 208 | map_queues_fn *map_queues; |
2836ee4b BVA |
209 | |
210 | #ifdef CONFIG_BLK_DEBUG_FS | |
211 | /* | |
212 | * Used by the debugfs implementation to show driver-specific | |
213 | * information about a request. | |
214 | */ | |
215 | void (*show_rq)(struct seq_file *m, struct request *rq); | |
216 | #endif | |
320ae51f JA |
217 | }; |
218 | ||
219 | enum { | |
320ae51f | 220 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
8a58d1f1 | 221 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
1b792f2f | 222 | BLK_MQ_F_BLOCKING = 1 << 5, |
d3484991 | 223 | BLK_MQ_F_NO_SCHED = 1 << 6, |
24391c0d SL |
224 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
225 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, | |
320ae51f | 226 | |
5d12f905 | 227 | BLK_MQ_S_STOPPED = 0, |
0d2602ca | 228 | BLK_MQ_S_TAG_ACTIVE = 1, |
bd166ef1 | 229 | BLK_MQ_S_SCHED_RESTART = 2, |
320ae51f | 230 | |
a4391c64 | 231 | BLK_MQ_MAX_DEPTH = 10240, |
506e931f JA |
232 | |
233 | BLK_MQ_CPU_WORK_BATCH = 8, | |
320ae51f | 234 | }; |
24391c0d SL |
235 | #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ |
236 | ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ | |
237 | ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) | |
238 | #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ | |
239 | ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ | |
240 | << BLK_MQ_F_ALLOC_POLICY_START_BIT) | |
320ae51f | 241 | |
24d2f903 | 242 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
b62c21b7 MS |
243 | struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, |
244 | struct request_queue *q); | |
9316a9ed JA |
245 | struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, |
246 | const struct blk_mq_ops *ops, | |
247 | unsigned int queue_depth, | |
248 | unsigned int set_flags); | |
b21d5b30 MB |
249 | int blk_mq_register_dev(struct device *, struct request_queue *); |
250 | void blk_mq_unregister_dev(struct device *, struct request_queue *); | |
320ae51f | 251 | |
24d2f903 CH |
252 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); |
253 | void blk_mq_free_tag_set(struct blk_mq_tag_set *set); | |
254 | ||
320ae51f JA |
255 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
256 | ||
320ae51f JA |
257 | void blk_mq_free_request(struct request *rq); |
258 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | |
6f3b0e8b | 259 | |
3c94d83c | 260 | bool blk_mq_queue_inflight(struct request_queue *q); |
ae879912 | 261 | |
6f3b0e8b | 262 | enum { |
9a95e4ef BVA |
263 | /* return when out of requests */ |
264 | BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), | |
265 | /* allocate from reserved pool */ | |
266 | BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), | |
267 | /* allocate internal/sched tag */ | |
268 | BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), | |
269 | /* set RQF_PREEMPT */ | |
270 | BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), | |
6f3b0e8b CH |
271 | }; |
272 | ||
cd6ce148 | 273 | struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, |
9a95e4ef | 274 | blk_mq_req_flags_t flags); |
cd6ce148 | 275 | struct request *blk_mq_alloc_request_hctx(struct request_queue *q, |
9a95e4ef BVA |
276 | unsigned int op, blk_mq_req_flags_t flags, |
277 | unsigned int hctx_idx); | |
0e62f51f | 278 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); |
320ae51f | 279 | |
205fb5f5 BVA |
280 | enum { |
281 | BLK_MQ_UNIQUE_TAG_BITS = 16, | |
282 | BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, | |
283 | }; | |
284 | ||
285 | u32 blk_mq_unique_tag(struct request *rq); | |
286 | ||
287 | static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) | |
288 | { | |
289 | return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; | |
290 | } | |
291 | ||
292 | static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) | |
293 | { | |
294 | return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; | |
295 | } | |
296 | ||
320ae51f | 297 | |
973c0191 | 298 | int blk_mq_request_started(struct request *rq); |
e2490073 | 299 | void blk_mq_start_request(struct request *rq); |
2a842aca CH |
300 | void blk_mq_end_request(struct request *rq, blk_status_t error); |
301 | void __blk_mq_end_request(struct request *rq, blk_status_t error); | |
320ae51f | 302 | |
2b053aca | 303 | void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); |
6fca6a61 | 304 | void blk_mq_kick_requeue_list(struct request_queue *q); |
2849450a | 305 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); |
16c15eb1 | 306 | bool blk_mq_complete_request(struct request *rq); |
1b8f21b7 | 307 | void blk_mq_complete_request_sync(struct request *rq); |
9c558734 JA |
308 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, |
309 | struct bio *bio); | |
fd001443 | 310 | bool blk_mq_queue_stopped(struct request_queue *q); |
320ae51f JA |
311 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
312 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | |
280d45f6 | 313 | void blk_mq_stop_hw_queues(struct request_queue *q); |
2f268556 | 314 | void blk_mq_start_hw_queues(struct request_queue *q); |
ae911c5e | 315 | void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
1b4a3258 | 316 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
97e01209 | 317 | void blk_mq_quiesce_queue(struct request_queue *q); |
e4e73913 | 318 | void blk_mq_unquiesce_queue(struct request_queue *q); |
7587a5ae | 319 | void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
79f720a7 | 320 | bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
b94ec296 | 321 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
e0489487 SG |
322 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
323 | busy_tag_iter_fn *fn, void *priv); | |
c761d96b | 324 | void blk_mq_freeze_queue(struct request_queue *q); |
b4c6a028 | 325 | void blk_mq_unfreeze_queue(struct request_queue *q); |
1671d522 | 326 | void blk_freeze_queue_start(struct request_queue *q); |
6bae363e | 327 | void blk_mq_freeze_queue_wait(struct request_queue *q); |
f91328c4 KB |
328 | int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, |
329 | unsigned long timeout); | |
320ae51f | 330 | |
ed76e329 | 331 | int blk_mq_map_queues(struct blk_mq_queue_map *qmap); |
868f2f0b KB |
332 | void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); |
333 | ||
852ec809 | 334 | void blk_mq_quiesce_queue_nowait(struct request_queue *q); |
4f084b41 | 335 | |
9cf2bab6 JA |
336 | unsigned int blk_mq_rq_cpu(struct request *rq); |
337 | ||
320ae51f JA |
338 | /* |
339 | * Driver command data is immediately after the request. So subtract request | |
2963e3f7 | 340 | * size to get back to the original request, add request size to get the PDU. |
320ae51f JA |
341 | */ |
342 | static inline struct request *blk_mq_rq_from_pdu(void *pdu) | |
343 | { | |
344 | return pdu - sizeof(struct request); | |
345 | } | |
346 | static inline void *blk_mq_rq_to_pdu(struct request *rq) | |
347 | { | |
2963e3f7 | 348 | return rq + 1; |
320ae51f JA |
349 | } |
350 | ||
320ae51f | 351 | #define queue_for_each_hw_ctx(q, hctx, i) \ |
0d0b7d42 JA |
352 | for ((i) = 0; (i) < (q)->nr_hw_queues && \ |
353 | ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) | |
320ae51f | 354 | |
320ae51f | 355 | #define hctx_for_each_ctx(hctx, ctx, i) \ |
0d0b7d42 JA |
356 | for ((i) = 0; (i) < (hctx)->nr_ctx && \ |
357 | ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) | |
320ae51f | 358 | |
7b7ab780 SG |
359 | static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, |
360 | struct request *rq) | |
361 | { | |
362 | if (rq->tag != -1) | |
363 | return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT); | |
364 | ||
365 | return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) | | |
366 | BLK_QC_T_INTERNAL; | |
367 | } | |
368 | ||
320ae51f | 369 | #endif |