Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
320ae51f JA |
2 | #ifndef BLK_MQ_H |
3 | #define BLK_MQ_H | |
4 | ||
5 | #include <linux/blkdev.h> | |
88459642 | 6 | #include <linux/sbitmap.h> |
6a83e74d | 7 | #include <linux/srcu.h> |
320ae51f JA |
8 | |
9 | struct blk_mq_tags; | |
f70ced09 | 10 | struct blk_flush_queue; |
320ae51f | 11 | |
fe644072 LW |
12 | /** |
13 | * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device | |
14 | */ | |
320ae51f JA |
15 | struct blk_mq_hw_ctx { |
16 | struct { | |
17 | spinlock_t lock; | |
18 | struct list_head dispatch; | |
8d354f13 | 19 | unsigned long state; /* BLK_MQ_S_* flags */ |
320ae51f JA |
20 | } ____cacheline_aligned_in_smp; |
21 | ||
9f993737 | 22 | struct delayed_work run_work; |
e4043dcf | 23 | cpumask_var_t cpumask; |
506e931f JA |
24 | int next_cpu; |
25 | int next_cpu_batch; | |
320ae51f JA |
26 | |
27 | unsigned long flags; /* BLK_MQ_F_* flags */ | |
28 | ||
bd166ef1 | 29 | void *sched_data; |
320ae51f | 30 | struct request_queue *queue; |
f70ced09 | 31 | struct blk_flush_queue *fq; |
320ae51f JA |
32 | |
33 | void *driver_data; | |
34 | ||
88459642 | 35 | struct sbitmap ctx_map; |
1429d7c9 | 36 | |
b347689f | 37 | struct blk_mq_ctx *dispatch_from; |
6e768717 | 38 | unsigned int dispatch_busy; |
b347689f | 39 | |
f31967f0 JA |
40 | unsigned short type; |
41 | unsigned short nr_ctx; | |
6e768717 | 42 | struct blk_mq_ctx **ctxs; |
4bb659b1 | 43 | |
5815839b | 44 | spinlock_t dispatch_wait_lock; |
eb619fdb | 45 | wait_queue_entry_t dispatch_wait; |
8537b120 | 46 | atomic_t wait_index; |
320ae51f | 47 | |
320ae51f | 48 | struct blk_mq_tags *tags; |
bd166ef1 | 49 | struct blk_mq_tags *sched_tags; |
320ae51f JA |
50 | |
51 | unsigned long queued; | |
52 | unsigned long run; | |
8d354f13 | 53 | #define BLK_MQ_MAX_DISPATCH_ORDER 7 |
320ae51f JA |
54 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; |
55 | ||
320ae51f | 56 | unsigned int numa_node; |
17ded320 | 57 | unsigned int queue_num; |
320ae51f | 58 | |
0d2602ca | 59 | atomic_t nr_active; |
1d9bd516 | 60 | unsigned int nr_expired; |
0d2602ca | 61 | |
9467f859 | 62 | struct hlist_node cpuhp_dead; |
320ae51f | 63 | struct kobject kobj; |
05229bee | 64 | |
6e219353 | 65 | unsigned long poll_considered; |
05229bee JA |
66 | unsigned long poll_invoked; |
67 | unsigned long poll_success; | |
9c1051aa OS |
68 | |
69 | #ifdef CONFIG_BLK_DEBUG_FS | |
70 | struct dentry *debugfs_dir; | |
d332ce09 | 71 | struct dentry *sched_debugfs_dir; |
9c1051aa | 72 | #endif |
07319678 BVA |
73 | |
74 | /* Must be the last member - see also blk_mq_hw_ctx_size(). */ | |
05707b64 | 75 | struct srcu_struct srcu[0]; |
320ae51f JA |
76 | }; |
77 | ||
ed76e329 JA |
78 | struct blk_mq_queue_map { |
79 | unsigned int *mq_map; | |
80 | unsigned int nr_queues; | |
843477d4 | 81 | unsigned int queue_offset; |
ed76e329 JA |
82 | }; |
83 | ||
84 | enum { | |
4b04cc6a | 85 | HCTX_MAX_TYPES = 3, |
ed76e329 JA |
86 | }; |
87 | ||
24d2f903 | 88 | struct blk_mq_tag_set { |
b3c661b1 JA |
89 | /* |
90 | * map[] holds ctx -> hctx mappings, one map exists for each type | |
91 | * that the driver wishes to support. There are no restrictions | |
92 | * on maps being of the same size, and it's perfectly legal to | |
93 | * share maps between types. | |
94 | */ | |
ed76e329 | 95 | struct blk_mq_queue_map map[HCTX_MAX_TYPES]; |
b3c661b1 | 96 | unsigned int nr_maps; /* nr entries in map[] */ |
f8a5b122 | 97 | const struct blk_mq_ops *ops; |
ed76e329 | 98 | unsigned int nr_hw_queues; /* nr hw queues across maps */ |
e3a2b3f9 | 99 | unsigned int queue_depth; /* max hw supported */ |
320ae51f JA |
100 | unsigned int reserved_tags; |
101 | unsigned int cmd_size; /* per-request extra data */ | |
102 | int numa_node; | |
103 | unsigned int timeout; | |
104 | unsigned int flags; /* BLK_MQ_F_* */ | |
24d2f903 CH |
105 | void *driver_data; |
106 | ||
107 | struct blk_mq_tags **tags; | |
0d2602ca JA |
108 | |
109 | struct mutex tag_list_lock; | |
110 | struct list_head tag_list; | |
320ae51f JA |
111 | }; |
112 | ||
74c45052 JA |
113 | struct blk_mq_queue_data { |
114 | struct request *rq; | |
74c45052 JA |
115 | bool last; |
116 | }; | |
117 | ||
fc17b653 CH |
118 | typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, |
119 | const struct blk_mq_queue_data *); | |
d666ba98 | 120 | typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *); |
b3c661b1 JA |
121 | /* takes rq->cmd_flags as input, returns a hardware type index */ |
122 | typedef int (rq_flags_to_type_fn)(struct request_queue *, unsigned int); | |
88022d72 | 123 | typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *); |
de148297 | 124 | typedef void (put_budget_fn)(struct blk_mq_hw_ctx *); |
0152fb6b | 125 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); |
320ae51f JA |
126 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); |
127 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); | |
d6296d39 | 128 | typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, |
24d2f903 | 129 | unsigned int, unsigned int); |
d6296d39 | 130 | typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, |
24d2f903 | 131 | unsigned int); |
320ae51f | 132 | |
7baa8572 | 133 | typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, |
81481eb4 | 134 | bool); |
7baa8572 | 135 | typedef bool (busy_tag_iter_fn)(struct request *, void *, bool); |
9743139c | 136 | typedef int (poll_fn)(struct blk_mq_hw_ctx *); |
da695ba2 | 137 | typedef int (map_queues_fn)(struct blk_mq_tag_set *set); |
9ba20527 | 138 | typedef bool (busy_fn)(struct request_queue *); |
c7bb9ad1 | 139 | typedef void (complete_fn)(struct request *); |
05229bee | 140 | |
81481eb4 | 141 | |
320ae51f JA |
142 | struct blk_mq_ops { |
143 | /* | |
144 | * Queue request | |
145 | */ | |
146 | queue_rq_fn *queue_rq; | |
147 | ||
d666ba98 JA |
148 | /* |
149 | * If a driver uses bd->last to judge when to submit requests to | |
150 | * hardware, it must define this function. In case of errors that | |
151 | * make us stop issuing further requests, this hook serves the | |
152 | * purpose of kicking the hardware (which the last request otherwise | |
153 | * would have done). | |
154 | */ | |
155 | commit_rqs_fn *commit_rqs; | |
156 | ||
b3c661b1 JA |
157 | /* |
158 | * Return a queue map type for the given request/bio flags | |
159 | */ | |
160 | rq_flags_to_type_fn *rq_flags_to_type; | |
161 | ||
de148297 ML |
162 | /* |
163 | * Reserve budget before queue request, once .queue_rq is | |
164 | * run, it is driver's responsibility to release the | |
165 | * reserved budget. Also we have to handle failure case | |
166 | * of .get_budget for avoiding I/O deadlock. | |
167 | */ | |
168 | get_budget_fn *get_budget; | |
169 | put_budget_fn *put_budget; | |
170 | ||
320ae51f JA |
171 | /* |
172 | * Called on request timeout | |
173 | */ | |
0152fb6b | 174 | timeout_fn *timeout; |
320ae51f | 175 | |
05229bee JA |
176 | /* |
177 | * Called to poll for completion of a specific tag. | |
178 | */ | |
179 | poll_fn *poll; | |
180 | ||
c7bb9ad1 | 181 | complete_fn *complete; |
30a91cb4 | 182 | |
320ae51f JA |
183 | /* |
184 | * Called when the block layer side of a hardware queue has been | |
185 | * set up, allowing the driver to allocate/init matching structures. | |
186 | * Ditto for exit/teardown. | |
187 | */ | |
188 | init_hctx_fn *init_hctx; | |
189 | exit_hctx_fn *exit_hctx; | |
e9b267d9 CH |
190 | |
191 | /* | |
192 | * Called for every command allocated by the block layer to allow | |
193 | * the driver to set up driver specific data. | |
f70ced09 ML |
194 | * |
195 | * Tag greater than or equal to queue_depth is for setting up | |
196 | * flush request. | |
197 | * | |
e9b267d9 CH |
198 | * Ditto for exit/teardown. |
199 | */ | |
200 | init_request_fn *init_request; | |
201 | exit_request_fn *exit_request; | |
d280bab3 BVA |
202 | /* Called from inside blk_get_request() */ |
203 | void (*initialize_rq_fn)(struct request *rq); | |
da695ba2 | 204 | |
9ba20527 JA |
205 | /* |
206 | * If set, returns whether or not this queue currently is busy | |
207 | */ | |
208 | busy_fn *busy; | |
209 | ||
da695ba2 | 210 | map_queues_fn *map_queues; |
2836ee4b BVA |
211 | |
212 | #ifdef CONFIG_BLK_DEBUG_FS | |
213 | /* | |
214 | * Used by the debugfs implementation to show driver-specific | |
215 | * information about a request. | |
216 | */ | |
217 | void (*show_rq)(struct seq_file *m, struct request *rq); | |
218 | #endif | |
320ae51f JA |
219 | }; |
220 | ||
221 | enum { | |
320ae51f | 222 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
8a58d1f1 JA |
223 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
224 | BLK_MQ_F_SG_MERGE = 1 << 2, | |
1b792f2f | 225 | BLK_MQ_F_BLOCKING = 1 << 5, |
d3484991 | 226 | BLK_MQ_F_NO_SCHED = 1 << 6, |
24391c0d SL |
227 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
228 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, | |
320ae51f | 229 | |
5d12f905 | 230 | BLK_MQ_S_STOPPED = 0, |
0d2602ca | 231 | BLK_MQ_S_TAG_ACTIVE = 1, |
bd166ef1 | 232 | BLK_MQ_S_SCHED_RESTART = 2, |
320ae51f | 233 | |
a4391c64 | 234 | BLK_MQ_MAX_DEPTH = 10240, |
506e931f JA |
235 | |
236 | BLK_MQ_CPU_WORK_BATCH = 8, | |
320ae51f | 237 | }; |
24391c0d SL |
238 | #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ |
239 | ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ | |
240 | ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) | |
241 | #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ | |
242 | ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ | |
243 | << BLK_MQ_F_ALLOC_POLICY_START_BIT) | |
320ae51f | 244 | |
24d2f903 | 245 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
b62c21b7 MS |
246 | struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, |
247 | struct request_queue *q); | |
9316a9ed JA |
248 | struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, |
249 | const struct blk_mq_ops *ops, | |
250 | unsigned int queue_depth, | |
251 | unsigned int set_flags); | |
b21d5b30 MB |
252 | int blk_mq_register_dev(struct device *, struct request_queue *); |
253 | void blk_mq_unregister_dev(struct device *, struct request_queue *); | |
320ae51f | 254 | |
24d2f903 CH |
255 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); |
256 | void blk_mq_free_tag_set(struct blk_mq_tag_set *set); | |
257 | ||
320ae51f JA |
258 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
259 | ||
320ae51f JA |
260 | void blk_mq_free_request(struct request *rq); |
261 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | |
6f3b0e8b | 262 | |
ae879912 JA |
263 | bool blk_mq_queue_busy(struct request_queue *q); |
264 | ||
6f3b0e8b | 265 | enum { |
9a95e4ef BVA |
266 | /* return when out of requests */ |
267 | BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), | |
268 | /* allocate from reserved pool */ | |
269 | BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), | |
270 | /* allocate internal/sched tag */ | |
271 | BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), | |
272 | /* set RQF_PREEMPT */ | |
273 | BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), | |
6f3b0e8b CH |
274 | }; |
275 | ||
cd6ce148 | 276 | struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, |
9a95e4ef | 277 | blk_mq_req_flags_t flags); |
cd6ce148 | 278 | struct request *blk_mq_alloc_request_hctx(struct request_queue *q, |
9a95e4ef BVA |
279 | unsigned int op, blk_mq_req_flags_t flags, |
280 | unsigned int hctx_idx); | |
0e62f51f | 281 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); |
320ae51f | 282 | |
205fb5f5 BVA |
283 | enum { |
284 | BLK_MQ_UNIQUE_TAG_BITS = 16, | |
285 | BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, | |
286 | }; | |
287 | ||
288 | u32 blk_mq_unique_tag(struct request *rq); | |
289 | ||
290 | static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) | |
291 | { | |
292 | return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; | |
293 | } | |
294 | ||
295 | static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) | |
296 | { | |
297 | return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; | |
298 | } | |
299 | ||
320ae51f | 300 | |
973c0191 | 301 | int blk_mq_request_started(struct request *rq); |
e2490073 | 302 | void blk_mq_start_request(struct request *rq); |
2a842aca CH |
303 | void blk_mq_end_request(struct request *rq, blk_status_t error); |
304 | void __blk_mq_end_request(struct request *rq, blk_status_t error); | |
320ae51f | 305 | |
2b053aca BVA |
306 | void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); |
307 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, | |
308 | bool kick_requeue_list); | |
6fca6a61 | 309 | void blk_mq_kick_requeue_list(struct request_queue *q); |
2849450a | 310 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); |
16c15eb1 | 311 | bool blk_mq_complete_request(struct request *rq); |
9c558734 JA |
312 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, |
313 | struct bio *bio); | |
fd001443 | 314 | bool blk_mq_queue_stopped(struct request_queue *q); |
320ae51f JA |
315 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
316 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | |
280d45f6 | 317 | void blk_mq_stop_hw_queues(struct request_queue *q); |
2f268556 | 318 | void blk_mq_start_hw_queues(struct request_queue *q); |
ae911c5e | 319 | void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
1b4a3258 | 320 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
97e01209 | 321 | void blk_mq_quiesce_queue(struct request_queue *q); |
e4e73913 | 322 | void blk_mq_unquiesce_queue(struct request_queue *q); |
7587a5ae | 323 | void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
79f720a7 | 324 | bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
b94ec296 | 325 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
e0489487 SG |
326 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
327 | busy_tag_iter_fn *fn, void *priv); | |
c761d96b | 328 | void blk_mq_freeze_queue(struct request_queue *q); |
b4c6a028 | 329 | void blk_mq_unfreeze_queue(struct request_queue *q); |
1671d522 | 330 | void blk_freeze_queue_start(struct request_queue *q); |
6bae363e | 331 | void blk_mq_freeze_queue_wait(struct request_queue *q); |
f91328c4 KB |
332 | int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, |
333 | unsigned long timeout); | |
320ae51f | 334 | |
ed76e329 | 335 | int blk_mq_map_queues(struct blk_mq_queue_map *qmap); |
868f2f0b KB |
336 | void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); |
337 | ||
852ec809 | 338 | void blk_mq_quiesce_queue_nowait(struct request_queue *q); |
4f084b41 | 339 | |
9cf2bab6 JA |
340 | unsigned int blk_mq_rq_cpu(struct request *rq); |
341 | ||
320ae51f JA |
342 | /* |
343 | * Driver command data is immediately after the request. So subtract request | |
2963e3f7 | 344 | * size to get back to the original request, add request size to get the PDU. |
320ae51f JA |
345 | */ |
346 | static inline struct request *blk_mq_rq_from_pdu(void *pdu) | |
347 | { | |
348 | return pdu - sizeof(struct request); | |
349 | } | |
350 | static inline void *blk_mq_rq_to_pdu(struct request *rq) | |
351 | { | |
2963e3f7 | 352 | return rq + 1; |
320ae51f JA |
353 | } |
354 | ||
320ae51f | 355 | #define queue_for_each_hw_ctx(q, hctx, i) \ |
0d0b7d42 JA |
356 | for ((i) = 0; (i) < (q)->nr_hw_queues && \ |
357 | ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) | |
320ae51f | 358 | |
320ae51f | 359 | #define hctx_for_each_ctx(hctx, ctx, i) \ |
0d0b7d42 JA |
360 | for ((i) = 0; (i) < (hctx)->nr_ctx && \ |
361 | ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) | |
320ae51f | 362 | |
320ae51f | 363 | #endif |