Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
320ae51f JA |
2 | #ifndef BLK_MQ_H |
3 | #define BLK_MQ_H | |
4 | ||
5 | #include <linux/blkdev.h> | |
88459642 | 6 | #include <linux/sbitmap.h> |
fb01a293 | 7 | #include <linux/lockdep.h> |
24b83deb | 8 | #include <linux/scatterlist.h> |
e028f167 | 9 | #include <linux/prefetch.h> |
80bd4a7a | 10 | #include <linux/srcu.h> |
44981351 | 11 | #include <linux/rw_hint.h> |
320ae51f JA |
12 | |
13 | struct blk_mq_tags; | |
f70ced09 | 14 | struct blk_flush_queue; |
320ae51f | 15 | |
24b83deb | 16 | #define BLKDEV_MIN_RQ 4 |
d2a27964 | 17 | #define BLKDEV_DEFAULT_RQ 128 |
24b83deb | 18 | |
de671d61 JA |
19 | enum rq_end_io_ret { |
20 | RQ_END_IO_NONE, | |
21 | RQ_END_IO_FREE, | |
22 | }; | |
23 | ||
24 | typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t); | |
24b83deb CH |
25 | |
26 | /* | |
27 | * request flags */ | |
28 | typedef __u32 __bitwise req_flags_t; | |
29 | ||
8a47e33f | 30 | /* Keep rqf_name[] in sync with the definitions below */ |
5f89154e JG |
31 | enum { |
32 | /* drive already may have started this one */ | |
33 | __RQF_STARTED, | |
34 | /* request for flush sequence */ | |
35 | __RQF_FLUSH_SEQ, | |
36 | /* merge of different types, fail separately */ | |
37 | __RQF_MIXED_MERGE, | |
38 | /* don't call prep for this one */ | |
39 | __RQF_DONTPREP, | |
40 | /* use hctx->sched_tags */ | |
41 | __RQF_SCHED_TAGS, | |
42 | /* use an I/O scheduler for this request */ | |
43 | __RQF_USE_SCHED, | |
44 | /* vaguely specified driver internal error. Ignored by block layer */ | |
45 | __RQF_FAILED, | |
46 | /* don't warn about errors */ | |
47 | __RQF_QUIET, | |
48 | /* account into disk and partition IO statistics */ | |
49 | __RQF_IO_STAT, | |
50 | /* runtime pm request */ | |
51 | __RQF_PM, | |
52 | /* on IO scheduler merge hash */ | |
53 | __RQF_HASHED, | |
54 | /* track IO completion time */ | |
55 | __RQF_STATS, | |
56 | /* Look at ->special_vec for the actual data payload instead of the | |
57 | bio chain. */ | |
58 | __RQF_SPECIAL_PAYLOAD, | |
59 | /* request completion needs to be signaled to zone write plugging. */ | |
60 | __RQF_ZONE_WRITE_PLUGGING, | |
61 | /* ->timeout has been called, don't expire again */ | |
62 | __RQF_TIMED_OUT, | |
63 | __RQF_RESV, | |
64 | __RQF_BITS | |
65 | }; | |
66 | ||
67 | #define RQF_STARTED ((__force req_flags_t)(1 << __RQF_STARTED)) | |
68 | #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << __RQF_FLUSH_SEQ)) | |
69 | #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << __RQF_MIXED_MERGE)) | |
70 | #define RQF_DONTPREP ((__force req_flags_t)(1 << __RQF_DONTPREP)) | |
71 | #define RQF_SCHED_TAGS ((__force req_flags_t)(1 << __RQF_SCHED_TAGS)) | |
72 | #define RQF_USE_SCHED ((__force req_flags_t)(1 << __RQF_USE_SCHED)) | |
73 | #define RQF_FAILED ((__force req_flags_t)(1 << __RQF_FAILED)) | |
74 | #define RQF_QUIET ((__force req_flags_t)(1 << __RQF_QUIET)) | |
75 | #define RQF_IO_STAT ((__force req_flags_t)(1 << __RQF_IO_STAT)) | |
76 | #define RQF_PM ((__force req_flags_t)(1 << __RQF_PM)) | |
77 | #define RQF_HASHED ((__force req_flags_t)(1 << __RQF_HASHED)) | |
78 | #define RQF_STATS ((__force req_flags_t)(1 << __RQF_STATS)) | |
79 | #define RQF_SPECIAL_PAYLOAD \ | |
80 | ((__force req_flags_t)(1 << __RQF_SPECIAL_PAYLOAD)) | |
81 | #define RQF_ZONE_WRITE_PLUGGING \ | |
82 | ((__force req_flags_t)(1 << __RQF_ZONE_WRITE_PLUGGING)) | |
83 | #define RQF_TIMED_OUT ((__force req_flags_t)(1 << __RQF_TIMED_OUT)) | |
84 | #define RQF_RESV ((__force req_flags_t)(1 << __RQF_RESV)) | |
24b83deb CH |
85 | |
86 | /* flags that prevent us from merging requests: */ | |
87 | #define RQF_NOMERGE_FLAGS \ | |
9a67aa52 | 88 | (RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) |
24b83deb CH |
89 | |
90 | enum mq_rq_state { | |
91 | MQ_RQ_IDLE = 0, | |
92 | MQ_RQ_IN_FLIGHT = 1, | |
93 | MQ_RQ_COMPLETE = 2, | |
94 | }; | |
95 | ||
96 | /* | |
97 | * Try to put the fields that are referenced together in the same cacheline. | |
98 | * | |
99 | * If you modify this structure, make sure to update blk_rq_init() and | |
100 | * especially blk_mq_rq_ctx_init() to take care of the added fields. | |
101 | */ | |
102 | struct request { | |
103 | struct request_queue *q; | |
104 | struct blk_mq_ctx *mq_ctx; | |
105 | struct blk_mq_hw_ctx *mq_hctx; | |
106 | ||
16458cf3 | 107 | blk_opf_t cmd_flags; /* op and common flags */ |
24b83deb CH |
108 | req_flags_t rq_flags; |
109 | ||
110 | int tag; | |
111 | int internal_tag; | |
112 | ||
b6087629 JA |
113 | unsigned int timeout; |
114 | ||
24b83deb CH |
115 | /* the following two fields are internal, NEVER access directly */ |
116 | unsigned int __data_len; /* total data len */ | |
117 | sector_t __sector; /* sector cursor */ | |
118 | ||
119 | struct bio *bio; | |
120 | struct bio *biotail; | |
121 | ||
47c122e3 JA |
122 | union { |
123 | struct list_head queuelist; | |
124 | struct request *rq_next; | |
125 | }; | |
24b83deb | 126 | |
24b83deb CH |
127 | struct block_device *part; |
128 | #ifdef CONFIG_BLK_RQ_ALLOC_TIME | |
129 | /* Time that the first bio started allocating this request. */ | |
130 | u64 alloc_time_ns; | |
131 | #endif | |
132 | /* Time that this request was allocated for this IO. */ | |
133 | u64 start_time_ns; | |
134 | /* Time that I/O was submitted to the device. */ | |
135 | u64 io_start_time_ns; | |
136 | ||
137 | #ifdef CONFIG_BLK_WBT | |
138 | unsigned short wbt_flags; | |
139 | #endif | |
140 | /* | |
141 | * rq sectors used for blk stats. It has the same value | |
142 | * with blk_rq_sectors(rq), except that it never be zeroed | |
143 | * by completion. | |
144 | */ | |
145 | unsigned short stats_sectors; | |
146 | ||
147 | /* | |
148 | * Number of scatter-gather DMA addr+len pairs after | |
149 | * physical address coalescing is performed. | |
150 | */ | |
151 | unsigned short nr_phys_segments; | |
152 | ||
153 | #ifdef CONFIG_BLK_DEV_INTEGRITY | |
154 | unsigned short nr_integrity_segments; | |
155 | #endif | |
156 | ||
157 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | |
158 | struct bio_crypt_ctx *crypt_ctx; | |
cb77cb5a | 159 | struct blk_crypto_keyslot *crypt_keyslot; |
24b83deb CH |
160 | #endif |
161 | ||
44981351 | 162 | enum rw_hint write_hint; |
24b83deb CH |
163 | unsigned short ioprio; |
164 | ||
165 | enum mq_rq_state state; | |
0a467d0f | 166 | atomic_t ref; |
24b83deb | 167 | |
24b83deb CH |
168 | unsigned long deadline; |
169 | ||
b6087629 JA |
170 | /* |
171 | * The hash is used inside the scheduler, and killed once the | |
172 | * request reaches the dispatch list. The ipi_list is only used | |
173 | * to queue the request for softirq completion, which is long | |
174 | * after the request has been unhashed (and even removed from | |
175 | * the dispatch list). | |
176 | */ | |
177 | union { | |
178 | struct hlist_node hash; /* merge hash */ | |
179 | struct llist_node ipi_list; | |
180 | }; | |
181 | ||
182 | /* | |
183 | * The rb_node is only used inside the io scheduler, requests | |
dc8cbb65 JA |
184 | * are pruned when moved to the dispatch queue. special_vec must |
185 | * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be | |
186 | * insert into an IO scheduler. | |
b6087629 JA |
187 | */ |
188 | union { | |
189 | struct rb_node rb_node; /* sort/lookup */ | |
190 | struct bio_vec special_vec; | |
b6087629 JA |
191 | }; |
192 | ||
b6087629 JA |
193 | /* |
194 | * Three pointers are available for the IO schedulers, if they need | |
be4c4278 | 195 | * more they have to dynamically allocate it. |
b6087629 | 196 | */ |
be4c4278 BVA |
197 | struct { |
198 | struct io_cq *icq; | |
199 | void *priv[2]; | |
200 | } elv; | |
201 | ||
202 | struct { | |
203 | unsigned int seq; | |
be4c4278 BVA |
204 | rq_end_io_fn *saved_end_io; |
205 | } flush; | |
b6087629 | 206 | |
660e802c | 207 | u64 fifo_time; |
24b83deb CH |
208 | |
209 | /* | |
210 | * completion callback. | |
211 | */ | |
212 | rq_end_io_fn *end_io; | |
213 | void *end_io_data; | |
214 | }; | |
215 | ||
2d9b02be BVA |
216 | static inline enum req_op req_op(const struct request *req) |
217 | { | |
218 | return req->cmd_flags & REQ_OP_MASK; | |
219 | } | |
24b83deb CH |
220 | |
221 | static inline bool blk_rq_is_passthrough(struct request *rq) | |
222 | { | |
712fd23a | 223 | return blk_op_is_passthrough(rq->cmd_flags); |
24b83deb CH |
224 | } |
225 | ||
226 | static inline unsigned short req_get_ioprio(struct request *req) | |
227 | { | |
228 | return req->ioprio; | |
229 | } | |
230 | ||
231 | #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) | |
232 | ||
233 | #define rq_dma_dir(rq) \ | |
234 | (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) | |
235 | ||
edce22e1 KB |
236 | #define rq_list_add(listptr, rq) do { \ |
237 | (rq)->rq_next = *(listptr); \ | |
238 | *(listptr) = rq; \ | |
239 | } while (0) | |
240 | ||
34e0a279 JK |
241 | #define rq_list_add_tail(lastpptr, rq) do { \ |
242 | (rq)->rq_next = NULL; \ | |
243 | **(lastpptr) = rq; \ | |
244 | *(lastpptr) = &rq->rq_next; \ | |
245 | } while (0) | |
246 | ||
edce22e1 KB |
247 | #define rq_list_pop(listptr) \ |
248 | ({ \ | |
249 | struct request *__req = NULL; \ | |
250 | if ((listptr) && *(listptr)) { \ | |
251 | __req = *(listptr); \ | |
252 | *(listptr) = __req->rq_next; \ | |
253 | } \ | |
254 | __req; \ | |
255 | }) | |
256 | ||
257 | #define rq_list_peek(listptr) \ | |
258 | ({ \ | |
259 | struct request *__req = NULL; \ | |
260 | if ((listptr) && *(listptr)) \ | |
261 | __req = *(listptr); \ | |
262 | __req; \ | |
263 | }) | |
264 | ||
265 | #define rq_list_for_each(listptr, pos) \ | |
266 | for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos)) | |
267 | ||
3764fd05 KB |
268 | #define rq_list_for_each_safe(listptr, pos, nxt) \ |
269 | for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \ | |
270 | pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL) | |
271 | ||
edce22e1 KB |
272 | #define rq_list_next(rq) (rq)->rq_next |
273 | #define rq_list_empty(list) ((list) == (struct request *) NULL) | |
274 | ||
d2528be7 KB |
275 | /** |
276 | * rq_list_move() - move a struct request from one list to another | |
277 | * @src: The source list @rq is currently in | |
278 | * @dst: The destination list that @rq will be appended to | |
279 | * @rq: The request to move | |
280 | * @prev: The request preceding @rq in @src (NULL if @rq is the head) | |
281 | */ | |
292c33c9 | 282 | static inline void rq_list_move(struct request **src, struct request **dst, |
d2528be7 KB |
283 | struct request *rq, struct request *prev) |
284 | { | |
285 | if (prev) | |
286 | prev->rq_next = rq->rq_next; | |
287 | else | |
288 | *src = rq->rq_next; | |
289 | rq_list_add(dst, rq); | |
290 | } | |
291 | ||
b2bed51a BVA |
292 | /** |
293 | * enum blk_eh_timer_return - How the timeout handler should proceed | |
294 | * @BLK_EH_DONE: The block driver completed the command or will complete it at | |
295 | * a later time. | |
296 | * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the | |
297 | * request to complete. | |
298 | */ | |
24b83deb | 299 | enum blk_eh_timer_return { |
b2bed51a BVA |
300 | BLK_EH_DONE, |
301 | BLK_EH_RESET_TIMER, | |
24b83deb CH |
302 | }; |
303 | ||
26d3bdb5 JG |
304 | /* Keep alloc_policy_name[] in sync with the definitions below */ |
305 | enum { | |
306 | BLK_TAG_ALLOC_FIFO, /* allocate starting from 0 */ | |
307 | BLK_TAG_ALLOC_RR, /* allocate starting from last allocated tag */ | |
308 | BLK_TAG_ALLOC_MAX | |
309 | }; | |
24b83deb | 310 | |
fe644072 | 311 | /** |
d386732b AA |
312 | * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware |
313 | * block device | |
fe644072 | 314 | */ |
320ae51f JA |
315 | struct blk_mq_hw_ctx { |
316 | struct { | |
d386732b | 317 | /** @lock: Protects the dispatch list. */ |
320ae51f | 318 | spinlock_t lock; |
d386732b AA |
319 | /** |
320 | * @dispatch: Used for requests that are ready to be | |
321 | * dispatched to the hardware but for some reason (e.g. lack of | |
322 | * resources) could not be sent to the hardware. As soon as the | |
323 | * driver can send new requests, requests at this list will | |
324 | * be sent first for a fairer dispatch. | |
325 | */ | |
320ae51f | 326 | struct list_head dispatch; |
d386732b AA |
327 | /** |
328 | * @state: BLK_MQ_S_* flags. Defines the state of the hw | |
329 | * queue (active, scheduled to restart, stopped). | |
330 | */ | |
331 | unsigned long state; | |
320ae51f JA |
332 | } ____cacheline_aligned_in_smp; |
333 | ||
d386732b AA |
334 | /** |
335 | * @run_work: Used for scheduling a hardware queue run at a later time. | |
336 | */ | |
9f993737 | 337 | struct delayed_work run_work; |
d386732b | 338 | /** @cpumask: Map of available CPUs where this hctx can run. */ |
e4043dcf | 339 | cpumask_var_t cpumask; |
d386732b AA |
340 | /** |
341 | * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU | |
342 | * selection from @cpumask. | |
343 | */ | |
506e931f | 344 | int next_cpu; |
d386732b AA |
345 | /** |
346 | * @next_cpu_batch: Counter of how many works left in the batch before | |
347 | * changing to the next CPU. | |
348 | */ | |
506e931f | 349 | int next_cpu_batch; |
320ae51f | 350 | |
d386732b AA |
351 | /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */ |
352 | unsigned long flags; | |
320ae51f | 353 | |
d386732b AA |
354 | /** |
355 | * @sched_data: Pointer owned by the IO scheduler attached to a request | |
356 | * queue. It's up to the IO scheduler how to use this pointer. | |
357 | */ | |
bd166ef1 | 358 | void *sched_data; |
d386732b AA |
359 | /** |
360 | * @queue: Pointer to the request queue that owns this hardware context. | |
361 | */ | |
320ae51f | 362 | struct request_queue *queue; |
d386732b | 363 | /** @fq: Queue of requests that need to perform a flush operation. */ |
f70ced09 | 364 | struct blk_flush_queue *fq; |
320ae51f | 365 | |
d386732b AA |
366 | /** |
367 | * @driver_data: Pointer to data owned by the block driver that created | |
368 | * this hctx | |
369 | */ | |
320ae51f JA |
370 | void *driver_data; |
371 | ||
d386732b AA |
372 | /** |
373 | * @ctx_map: Bitmap for each software queue. If bit is on, there is a | |
374 | * pending request in that software queue. | |
375 | */ | |
88459642 | 376 | struct sbitmap ctx_map; |
1429d7c9 | 377 | |
d386732b AA |
378 | /** |
379 | * @dispatch_from: Software queue to be used when no scheduler was | |
380 | * selected. | |
381 | */ | |
b347689f | 382 | struct blk_mq_ctx *dispatch_from; |
d386732b AA |
383 | /** |
384 | * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to | |
385 | * decide if the hw_queue is busy using Exponential Weighted Moving | |
386 | * Average algorithm. | |
387 | */ | |
6e768717 | 388 | unsigned int dispatch_busy; |
b347689f | 389 | |
d386732b | 390 | /** @type: HCTX_TYPE_* flags. Type of hardware queue. */ |
f31967f0 | 391 | unsigned short type; |
d386732b | 392 | /** @nr_ctx: Number of software queues. */ |
f31967f0 | 393 | unsigned short nr_ctx; |
d386732b | 394 | /** @ctxs: Array of software queues. */ |
6e768717 | 395 | struct blk_mq_ctx **ctxs; |
4bb659b1 | 396 | |
d386732b | 397 | /** @dispatch_wait_lock: Lock for dispatch_wait queue. */ |
5815839b | 398 | spinlock_t dispatch_wait_lock; |
d386732b AA |
399 | /** |
400 | * @dispatch_wait: Waitqueue to put requests when there is no tag | |
401 | * available at the moment, to wait for another try in the future. | |
402 | */ | |
eb619fdb | 403 | wait_queue_entry_t dispatch_wait; |
d386732b AA |
404 | |
405 | /** | |
406 | * @wait_index: Index of next available dispatch_wait queue to insert | |
407 | * requests. | |
408 | */ | |
8537b120 | 409 | atomic_t wait_index; |
320ae51f | 410 | |
d386732b AA |
411 | /** |
412 | * @tags: Tags owned by the block driver. A tag at this set is only | |
413 | * assigned when a request is dispatched from a hardware queue. | |
414 | */ | |
320ae51f | 415 | struct blk_mq_tags *tags; |
d386732b AA |
416 | /** |
417 | * @sched_tags: Tags owned by I/O scheduler. If there is an I/O | |
418 | * scheduler associated with a request queue, a tag is assigned when | |
419 | * that request is allocated. Else, this member is not used. | |
420 | */ | |
bd166ef1 | 421 | struct blk_mq_tags *sched_tags; |
320ae51f | 422 | |
d386732b | 423 | /** @numa_node: NUMA node the storage adapter has been connected to. */ |
320ae51f | 424 | unsigned int numa_node; |
d386732b | 425 | /** @queue_num: Index of this hardware queue. */ |
17ded320 | 426 | unsigned int queue_num; |
320ae51f | 427 | |
d386732b AA |
428 | /** |
429 | * @nr_active: Number of active requests. Only used when a tag set is | |
430 | * shared across request queues. | |
431 | */ | |
0d2602ca JA |
432 | atomic_t nr_active; |
433 | ||
bf0beec0 ML |
434 | /** @cpuhp_online: List to store request if CPU is going to die */ |
435 | struct hlist_node cpuhp_online; | |
d386732b | 436 | /** @cpuhp_dead: List to store request if some CPU die. */ |
9467f859 | 437 | struct hlist_node cpuhp_dead; |
d386732b | 438 | /** @kobj: Kernel object for sysfs. */ |
320ae51f | 439 | struct kobject kobj; |
05229bee | 440 | |
9c1051aa | 441 | #ifdef CONFIG_BLK_DEBUG_FS |
d386732b AA |
442 | /** |
443 | * @debugfs_dir: debugfs directory for this hardware queue. Named | |
444 | * as cpu<cpu_number>. | |
445 | */ | |
9c1051aa | 446 | struct dentry *debugfs_dir; |
d386732b | 447 | /** @sched_debugfs_dir: debugfs directory for the scheduler. */ |
d332ce09 | 448 | struct dentry *sched_debugfs_dir; |
9c1051aa | 449 | #endif |
07319678 | 450 | |
2dd209f0 BVA |
451 | /** |
452 | * @hctx_list: if this hctx is not in use, this is an entry in | |
453 | * q->unused_hctx_list. | |
454 | */ | |
2f8f1336 | 455 | struct list_head hctx_list; |
320ae51f JA |
456 | }; |
457 | ||
7a18312c | 458 | /** |
d386732b | 459 | * struct blk_mq_queue_map - Map software queues to hardware queues |
7a18312c BVA |
460 | * @mq_map: CPU ID to hardware queue index map. This is an array |
461 | * with nr_cpu_ids elements. Each element has a value in the range | |
462 | * [@queue_offset, @queue_offset + @nr_queues). | |
463 | * @nr_queues: Number of hardware queues to map CPU IDs onto. | |
464 | * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe | |
465 | * driver to map each hardware queue type (enum hctx_type) onto a distinct | |
466 | * set of hardware queues. | |
467 | */ | |
ed76e329 JA |
468 | struct blk_mq_queue_map { |
469 | unsigned int *mq_map; | |
470 | unsigned int nr_queues; | |
843477d4 | 471 | unsigned int queue_offset; |
ed76e329 JA |
472 | }; |
473 | ||
d386732b AA |
474 | /** |
475 | * enum hctx_type - Type of hardware queue | |
476 | * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for. | |
477 | * @HCTX_TYPE_READ: Just for READ I/O. | |
478 | * @HCTX_TYPE_POLL: Polled I/O of any kind. | |
479 | * @HCTX_MAX_TYPES: Number of types of hctx. | |
480 | */ | |
e20ba6e1 | 481 | enum hctx_type { |
d386732b AA |
482 | HCTX_TYPE_DEFAULT, |
483 | HCTX_TYPE_READ, | |
484 | HCTX_TYPE_POLL, | |
e20ba6e1 CH |
485 | |
486 | HCTX_MAX_TYPES, | |
ed76e329 JA |
487 | }; |
488 | ||
7a18312c BVA |
489 | /** |
490 | * struct blk_mq_tag_set - tag set that can be shared between request queues | |
d88cbbb3 | 491 | * @ops: Pointers to functions that implement block driver behavior. |
7a18312c BVA |
492 | * @map: One or more ctx -> hctx mappings. One map exists for each |
493 | * hardware queue type (enum hctx_type) that the driver wishes | |
494 | * to support. There are no restrictions on maps being of the | |
495 | * same size, and it's perfectly legal to share maps between | |
496 | * types. | |
497 | * @nr_maps: Number of elements in the @map array. A number in the range | |
498 | * [1, HCTX_MAX_TYPES]. | |
7a18312c BVA |
499 | * @nr_hw_queues: Number of hardware queues supported by the block driver that |
500 | * owns this data structure. | |
501 | * @queue_depth: Number of tags per hardware queue, reserved tags included. | |
502 | * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag | |
503 | * allocations. | |
504 | * @cmd_size: Number of additional bytes to allocate per request. The block | |
505 | * driver owns these additional bytes. | |
506 | * @numa_node: NUMA node the storage adapter has been connected to. | |
507 | * @timeout: Request processing timeout in jiffies. | |
508 | * @flags: Zero or more BLK_MQ_F_* flags. | |
509 | * @driver_data: Pointer to data owned by the block driver that created this | |
510 | * tag set. | |
511 | * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues | |
512 | * elements. | |
079a2e3e JG |
513 | * @shared_tags: |
514 | * Shared set of tags. Has @nr_hw_queues elements. If set, | |
515 | * shared by all @tags. | |
7a18312c BVA |
516 | * @tag_list_lock: Serializes tag_list accesses. |
517 | * @tag_list: List of the request queues that use this tag set. See also | |
518 | * request_queue.tag_set_list. | |
80bd4a7a CH |
519 | * @srcu: Use as lock when type of the request queue is blocking |
520 | * (BLK_MQ_F_BLOCKING). | |
7a18312c | 521 | */ |
24d2f903 | 522 | struct blk_mq_tag_set { |
d88cbbb3 | 523 | const struct blk_mq_ops *ops; |
ed76e329 | 524 | struct blk_mq_queue_map map[HCTX_MAX_TYPES]; |
7a18312c | 525 | unsigned int nr_maps; |
7a18312c BVA |
526 | unsigned int nr_hw_queues; |
527 | unsigned int queue_depth; | |
320ae51f | 528 | unsigned int reserved_tags; |
7a18312c | 529 | unsigned int cmd_size; |
320ae51f JA |
530 | int numa_node; |
531 | unsigned int timeout; | |
7a18312c | 532 | unsigned int flags; |
24d2f903 CH |
533 | void *driver_data; |
534 | ||
535 | struct blk_mq_tags **tags; | |
0d2602ca | 536 | |
079a2e3e | 537 | struct blk_mq_tags *shared_tags; |
e155b0c2 | 538 | |
0d2602ca JA |
539 | struct mutex tag_list_lock; |
540 | struct list_head tag_list; | |
80bd4a7a | 541 | struct srcu_struct *srcu; |
320ae51f JA |
542 | }; |
543 | ||
d386732b AA |
544 | /** |
545 | * struct blk_mq_queue_data - Data about a request inserted in a queue | |
546 | * | |
547 | * @rq: Request pointer. | |
548 | * @last: If it is the last request in the queue. | |
549 | */ | |
74c45052 JA |
550 | struct blk_mq_queue_data { |
551 | struct request *rq; | |
74c45052 JA |
552 | bool last; |
553 | }; | |
554 | ||
2dd6532e | 555 | typedef bool (busy_tag_iter_fn)(struct request *, void *); |
05229bee | 556 | |
d386732b AA |
557 | /** |
558 | * struct blk_mq_ops - Callback functions that implements block driver | |
559 | * behaviour. | |
560 | */ | |
320ae51f | 561 | struct blk_mq_ops { |
d386732b AA |
562 | /** |
563 | * @queue_rq: Queue a new request from block IO. | |
320ae51f | 564 | */ |
0516c2f6 DW |
565 | blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, |
566 | const struct blk_mq_queue_data *); | |
320ae51f | 567 | |
d386732b AA |
568 | /** |
569 | * @commit_rqs: If a driver uses bd->last to judge when to submit | |
570 | * requests to hardware, it must define this function. In case of errors | |
571 | * that make us stop issuing further requests, this hook serves the | |
d666ba98 JA |
572 | * purpose of kicking the hardware (which the last request otherwise |
573 | * would have done). | |
574 | */ | |
0516c2f6 | 575 | void (*commit_rqs)(struct blk_mq_hw_ctx *); |
d666ba98 | 576 | |
3c67d44d JA |
577 | /** |
578 | * @queue_rqs: Queue a list of new requests. Driver is guaranteed | |
579 | * that each request belongs to the same queue. If the driver doesn't | |
580 | * empty the @rqlist completely, then the rest will be queued | |
581 | * individually by the block layer upon return. | |
582 | */ | |
583 | void (*queue_rqs)(struct request **rqlist); | |
584 | ||
d386732b AA |
585 | /** |
586 | * @get_budget: Reserve budget before queue request, once .queue_rq is | |
de148297 ML |
587 | * run, it is driver's responsibility to release the |
588 | * reserved budget. Also we have to handle failure case | |
589 | * of .get_budget for avoiding I/O deadlock. | |
590 | */ | |
2a5a24aa | 591 | int (*get_budget)(struct request_queue *); |
0516c2f6 | 592 | |
d386732b AA |
593 | /** |
594 | * @put_budget: Release the reserved budget. | |
595 | */ | |
2a5a24aa | 596 | void (*put_budget)(struct request_queue *, int); |
de148297 | 597 | |
85367040 ML |
598 | /** |
599 | * @set_rq_budget_token: store rq's budget token | |
d022d18c ML |
600 | */ |
601 | void (*set_rq_budget_token)(struct request *, int); | |
85367040 ML |
602 | /** |
603 | * @get_rq_budget_token: retrieve rq's budget token | |
d022d18c ML |
604 | */ |
605 | int (*get_rq_budget_token)(struct request *); | |
606 | ||
d386732b AA |
607 | /** |
608 | * @timeout: Called on request timeout. | |
320ae51f | 609 | */ |
9bdb4833 | 610 | enum blk_eh_timer_return (*timeout)(struct request *); |
320ae51f | 611 | |
d386732b AA |
612 | /** |
613 | * @poll: Called to poll for completion of a specific tag. | |
05229bee | 614 | */ |
5a72e899 | 615 | int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); |
05229bee | 616 | |
d386732b AA |
617 | /** |
618 | * @complete: Mark the request as complete. | |
619 | */ | |
0516c2f6 | 620 | void (*complete)(struct request *); |
30a91cb4 | 621 | |
d386732b AA |
622 | /** |
623 | * @init_hctx: Called when the block layer side of a hardware queue has | |
624 | * been set up, allowing the driver to allocate/init matching | |
625 | * structures. | |
320ae51f | 626 | */ |
0516c2f6 | 627 | int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); |
d386732b AA |
628 | /** |
629 | * @exit_hctx: Ditto for exit/teardown. | |
630 | */ | |
0516c2f6 | 631 | void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); |
e9b267d9 | 632 | |
d386732b AA |
633 | /** |
634 | * @init_request: Called for every command allocated by the block layer | |
635 | * to allow the driver to set up driver specific data. | |
f70ced09 ML |
636 | * |
637 | * Tag greater than or equal to queue_depth is for setting up | |
638 | * flush request. | |
e9b267d9 | 639 | */ |
0516c2f6 DW |
640 | int (*init_request)(struct blk_mq_tag_set *set, struct request *, |
641 | unsigned int, unsigned int); | |
d386732b AA |
642 | /** |
643 | * @exit_request: Ditto for exit/teardown. | |
644 | */ | |
0516c2f6 DW |
645 | void (*exit_request)(struct blk_mq_tag_set *set, struct request *, |
646 | unsigned int); | |
d386732b | 647 | |
d386732b AA |
648 | /** |
649 | * @cleanup_rq: Called before freeing one request which isn't completed | |
650 | * yet, and usually for freeing the driver private data. | |
226b4fc7 | 651 | */ |
0516c2f6 | 652 | void (*cleanup_rq)(struct request *); |
226b4fc7 | 653 | |
d386732b AA |
654 | /** |
655 | * @busy: If set, returns whether or not this queue currently is busy. | |
9ba20527 | 656 | */ |
0516c2f6 | 657 | bool (*busy)(struct request_queue *); |
9ba20527 | 658 | |
d386732b AA |
659 | /** |
660 | * @map_queues: This allows drivers specify their own queue mapping by | |
661 | * overriding the setup-time function that builds the mq_map. | |
662 | */ | |
a4e1d0b7 | 663 | void (*map_queues)(struct blk_mq_tag_set *set); |
2836ee4b BVA |
664 | |
665 | #ifdef CONFIG_BLK_DEBUG_FS | |
d386732b AA |
666 | /** |
667 | * @show_rq: Used by the debugfs implementation to show driver-specific | |
2836ee4b BVA |
668 | * information about a request. |
669 | */ | |
670 | void (*show_rq)(struct seq_file *m, struct request *rq); | |
671 | #endif | |
320ae51f JA |
672 | }; |
673 | ||
226f0f6a | 674 | /* Keep hctx_flag_name[] in sync with the definitions below */ |
320ae51f | 675 | enum { |
320ae51f | 676 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
51db1c37 | 677 | BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1, |
bf0beec0 ML |
678 | /* |
679 | * Set when this device requires underlying blk-mq device for | |
680 | * completing IO: | |
681 | */ | |
682 | BLK_MQ_F_STACKING = 1 << 2, | |
32bc15af | 683 | BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, |
226f0f6a | 684 | BLK_MQ_F_BLOCKING = 1 << 4, |
90b71980 | 685 | /* Do not allow an I/O scheduler to be configured. */ |
226f0f6a JG |
686 | BLK_MQ_F_NO_SCHED = 1 << 5, |
687 | ||
90b71980 BVA |
688 | /* |
689 | * Select 'none' during queue registration in case of a single hwq | |
690 | * or shared hwqs instead of 'mq-deadline'. | |
691 | */ | |
226f0f6a JG |
692 | BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 6, |
693 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 7, | |
24391c0d | 694 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, |
320ae51f | 695 | }; |
24391c0d SL |
696 | #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ |
697 | ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ | |
698 | ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) | |
699 | #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ | |
700 | ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ | |
701 | << BLK_MQ_F_ALLOC_POLICY_START_BIT) | |
320ae51f | 702 | |
793356d2 | 703 | #define BLK_MQ_MAX_DEPTH (10240) |
e155b0c2 JG |
704 | #define BLK_MQ_NO_HCTX_IDX (-1U) |
705 | ||
23827310 JG |
706 | enum { |
707 | /* Keep hctx_state_name[] in sync with the definitions below */ | |
708 | BLK_MQ_S_STOPPED, | |
709 | BLK_MQ_S_TAG_ACTIVE, | |
710 | BLK_MQ_S_SCHED_RESTART, | |
711 | /* hw queue is inactive after all its CPUs become offline */ | |
712 | BLK_MQ_S_INACTIVE, | |
713 | BLK_MQ_S_MAX | |
714 | }; | |
715 | ||
27e32cd2 CH |
716 | struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, |
717 | struct queue_limits *lim, void *queuedata, | |
4dcc4874 | 718 | struct lock_class_key *lkclass); |
27e32cd2 | 719 | #define blk_mq_alloc_disk(set, lim, queuedata) \ |
b461dfc4 CH |
720 | ({ \ |
721 | static struct lock_class_key __key; \ | |
b461dfc4 | 722 | \ |
27e32cd2 | 723 | __blk_mq_alloc_disk(set, lim, queuedata, &__key); \ |
b461dfc4 | 724 | }) |
6f8191fd CH |
725 | struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, |
726 | struct lock_class_key *lkclass); | |
9ac4dd8c CH |
727 | struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, |
728 | struct queue_limits *lim, void *queuedata); | |
26a9750a CH |
729 | int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, |
730 | struct request_queue *q); | |
6f8191fd | 731 | void blk_mq_destroy_queue(struct request_queue *); |
320ae51f | 732 | |
24d2f903 | 733 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); |
cdb14e0f CH |
734 | int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, |
735 | const struct blk_mq_ops *ops, unsigned int queue_depth, | |
736 | unsigned int set_flags); | |
24d2f903 CH |
737 | void blk_mq_free_tag_set(struct blk_mq_tag_set *set); |
738 | ||
320ae51f | 739 | void blk_mq_free_request(struct request *rq); |
f6c80cff KB |
740 | int blk_rq_poll(struct request *rq, struct io_comp_batch *iob, |
741 | unsigned int poll_flags); | |
6f3b0e8b | 742 | |
3c94d83c | 743 | bool blk_mq_queue_inflight(struct request_queue *q); |
ae879912 | 744 | |
6f3b0e8b | 745 | enum { |
9a95e4ef BVA |
746 | /* return when out of requests */ |
747 | BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), | |
748 | /* allocate from reserved pool */ | |
749 | BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), | |
0854bcdc BVA |
750 | /* set RQF_PM */ |
751 | BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2), | |
6f3b0e8b CH |
752 | }; |
753 | ||
16458cf3 | 754 | struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, |
9a95e4ef | 755 | blk_mq_req_flags_t flags); |
cd6ce148 | 756 | struct request *blk_mq_alloc_request_hctx(struct request_queue *q, |
16458cf3 | 757 | blk_opf_t opf, blk_mq_req_flags_t flags, |
9a95e4ef | 758 | unsigned int hctx_idx); |
e028f167 JA |
759 | |
760 | /* | |
761 | * Tag address space map. | |
762 | */ | |
763 | struct blk_mq_tags { | |
764 | unsigned int nr_tags; | |
765 | unsigned int nr_reserved_tags; | |
4f1731df | 766 | unsigned int active_queues; |
e028f167 JA |
767 | |
768 | struct sbitmap_queue bitmap_tags; | |
769 | struct sbitmap_queue breserved_tags; | |
770 | ||
771 | struct request **rqs; | |
772 | struct request **static_rqs; | |
773 | struct list_head page_list; | |
774 | ||
775 | /* | |
776 | * used to clear request reference in rqs[] before freeing one | |
777 | * request pool | |
778 | */ | |
779 | spinlock_t lock; | |
780 | }; | |
781 | ||
782 | static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, | |
783 | unsigned int tag) | |
784 | { | |
785 | if (tag < tags->nr_tags) { | |
786 | prefetch(tags->rqs[tag]); | |
787 | return tags->rqs[tag]; | |
788 | } | |
789 | ||
790 | return NULL; | |
791 | } | |
320ae51f | 792 | |
205fb5f5 BVA |
793 | enum { |
794 | BLK_MQ_UNIQUE_TAG_BITS = 16, | |
795 | BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, | |
796 | }; | |
797 | ||
798 | u32 blk_mq_unique_tag(struct request *rq); | |
799 | ||
800 | static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) | |
801 | { | |
802 | return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; | |
803 | } | |
804 | ||
805 | static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) | |
806 | { | |
807 | return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; | |
808 | } | |
809 | ||
27a46989 PB |
810 | /** |
811 | * blk_mq_rq_state() - read the current MQ_RQ_* state of a request | |
812 | * @rq: target request. | |
813 | */ | |
814 | static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) | |
815 | { | |
816 | return READ_ONCE(rq->state); | |
817 | } | |
818 | ||
819 | static inline int blk_mq_request_started(struct request *rq) | |
820 | { | |
821 | return blk_mq_rq_state(rq) != MQ_RQ_IDLE; | |
822 | } | |
823 | ||
824 | static inline int blk_mq_request_completed(struct request *rq) | |
825 | { | |
826 | return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; | |
827 | } | |
320ae51f | 828 | |
83fba8c8 CL |
829 | /* |
830 | * | |
831 | * Set the state to complete when completing a request from inside ->queue_rq. | |
832 | * This is used by drivers that want to ensure special complete actions that | |
833 | * need access to the request are called on failure, e.g. by nvme for | |
834 | * multipathing. | |
835 | */ | |
836 | static inline void blk_mq_set_request_complete(struct request *rq) | |
837 | { | |
838 | WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); | |
839 | } | |
840 | ||
e8dc17e2 SAS |
841 | /* |
842 | * Complete the request directly instead of deferring it to softirq or | |
843 | * completing it another CPU. Useful in preemptible instead of an interrupt. | |
844 | */ | |
845 | static inline void blk_mq_complete_request_direct(struct request *rq, | |
846 | void (*complete)(struct request *rq)) | |
847 | { | |
848 | WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); | |
849 | complete(rq); | |
850 | } | |
851 | ||
e2490073 | 852 | void blk_mq_start_request(struct request *rq); |
2a842aca CH |
853 | void blk_mq_end_request(struct request *rq, blk_status_t error); |
854 | void __blk_mq_end_request(struct request *rq, blk_status_t error); | |
f794f335 JA |
855 | void blk_mq_end_request_batch(struct io_comp_batch *ib); |
856 | ||
857 | /* | |
858 | * Only need start/end time stamping if we have iostat or | |
859 | * blk stats enabled, or using an IO scheduler. | |
860 | */ | |
861 | static inline bool blk_mq_need_time_stamp(struct request *rq) | |
862 | { | |
8e6e83d7 KK |
863 | /* |
864 | * passthrough io doesn't use iostat accounting, cgroup stats | |
865 | * and io scheduler functionalities. | |
866 | */ | |
867 | if (blk_rq_is_passthrough(rq)) | |
868 | return false; | |
dd6216bb | 869 | return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED)); |
f794f335 JA |
870 | } |
871 | ||
99e48cd6 JG |
872 | static inline bool blk_mq_is_reserved_rq(struct request *rq) |
873 | { | |
874 | return rq->rq_flags & RQF_RESV; | |
875 | } | |
876 | ||
f794f335 JA |
877 | /* |
878 | * Batched completions only work when there is no I/O error and no special | |
879 | * ->end_io handler. | |
880 | */ | |
881 | static inline bool blk_mq_add_to_batch(struct request *req, | |
882 | struct io_comp_batch *iob, int ioerror, | |
883 | void (*complete)(struct io_comp_batch *)) | |
884 | { | |
c6b7a3a2 ML |
885 | /* |
886 | * blk_mq_end_request_batch() can't end request allocated from | |
887 | * sched tags | |
888 | */ | |
889 | if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror || | |
2d87d455 | 890 | (req->end_io && !blk_rq_is_passthrough(req))) |
f794f335 | 891 | return false; |
ab3e1d3b | 892 | |
f794f335 JA |
893 | if (!iob->complete) |
894 | iob->complete = complete; | |
895 | else if (iob->complete != complete) | |
896 | return false; | |
897 | iob->need_ts |= blk_mq_need_time_stamp(req); | |
898 | rq_list_add(&iob->req_list, req); | |
899 | return true; | |
900 | } | |
320ae51f | 901 | |
2b053aca | 902 | void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); |
6fca6a61 | 903 | void blk_mq_kick_requeue_list(struct request_queue *q); |
2849450a | 904 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); |
15f73f5b | 905 | void blk_mq_complete_request(struct request *rq); |
40d09b53 | 906 | bool blk_mq_complete_request_remote(struct request *rq); |
320ae51f JA |
907 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
908 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | |
280d45f6 | 909 | void blk_mq_stop_hw_queues(struct request_queue *q); |
2f268556 | 910 | void blk_mq_start_hw_queues(struct request_queue *q); |
ae911c5e | 911 | void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
1b4a3258 | 912 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
97e01209 | 913 | void blk_mq_quiesce_queue(struct request_queue *q); |
483239c7 | 914 | void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set); |
414dd48e CL |
915 | void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set); |
916 | void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set); | |
e4e73913 | 917 | void blk_mq_unquiesce_queue(struct request_queue *q); |
7587a5ae | 918 | void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
626fb735 | 919 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
b94ec296 | 920 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
b9151e7b | 921 | void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs); |
e0489487 SG |
922 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
923 | busy_tag_iter_fn *fn, void *priv); | |
f9934a80 | 924 | void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); |
c761d96b | 925 | void blk_mq_freeze_queue(struct request_queue *q); |
b4c6a028 | 926 | void blk_mq_unfreeze_queue(struct request_queue *q); |
1671d522 | 927 | void blk_freeze_queue_start(struct request_queue *q); |
6bae363e | 928 | void blk_mq_freeze_queue_wait(struct request_queue *q); |
f91328c4 KB |
929 | int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, |
930 | unsigned long timeout); | |
320ae51f | 931 | |
a4e1d0b7 | 932 | void blk_mq_map_queues(struct blk_mq_queue_map *qmap); |
868f2f0b KB |
933 | void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); |
934 | ||
852ec809 | 935 | void blk_mq_quiesce_queue_nowait(struct request_queue *q); |
4f084b41 | 936 | |
9cf2bab6 JA |
937 | unsigned int blk_mq_rq_cpu(struct request *rq); |
938 | ||
15f73f5b CH |
939 | bool __blk_should_fake_timeout(struct request_queue *q); |
940 | static inline bool blk_should_fake_timeout(struct request_queue *q) | |
941 | { | |
942 | if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) && | |
943 | test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) | |
944 | return __blk_should_fake_timeout(q); | |
945 | return false; | |
946 | } | |
947 | ||
d386732b AA |
948 | /** |
949 | * blk_mq_rq_from_pdu - cast a PDU to a request | |
950 | * @pdu: the PDU (Protocol Data Unit) to be casted | |
951 | * | |
952 | * Return: request | |
953 | * | |
320ae51f | 954 | * Driver command data is immediately after the request. So subtract request |
d386732b | 955 | * size to get back to the original request. |
320ae51f JA |
956 | */ |
957 | static inline struct request *blk_mq_rq_from_pdu(void *pdu) | |
958 | { | |
959 | return pdu - sizeof(struct request); | |
960 | } | |
d386732b AA |
961 | |
962 | /** | |
963 | * blk_mq_rq_to_pdu - cast a request to a PDU | |
964 | * @rq: the request to be casted | |
965 | * | |
966 | * Return: pointer to the PDU | |
967 | * | |
968 | * Driver command data is immediately after the request. So add request to get | |
969 | * the PDU. | |
970 | */ | |
320ae51f JA |
971 | static inline void *blk_mq_rq_to_pdu(struct request *rq) |
972 | { | |
2963e3f7 | 973 | return rq + 1; |
320ae51f JA |
974 | } |
975 | ||
320ae51f | 976 | #define queue_for_each_hw_ctx(q, hctx, i) \ |
4e5cc99e | 977 | xa_for_each(&(q)->hctx_table, (i), (hctx)) |
320ae51f | 978 | |
320ae51f | 979 | #define hctx_for_each_ctx(hctx, ctx, i) \ |
0d0b7d42 JA |
980 | for ((i) = 0; (i) < (hctx)->nr_ctx && \ |
981 | ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) | |
320ae51f | 982 | |
226b4fc7 ML |
983 | static inline void blk_mq_cleanup_rq(struct request *rq) |
984 | { | |
985 | if (rq->q->mq_ops->cleanup_rq) | |
986 | rq->q->mq_ops->cleanup_rq(rq); | |
987 | } | |
988 | ||
53ffabfd CK |
989 | static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, |
990 | unsigned int nr_segs) | |
991 | { | |
992 | rq->nr_phys_segments = nr_segs; | |
993 | rq->__data_len = bio->bi_iter.bi_size; | |
994 | rq->bio = rq->biotail = bio; | |
995 | rq->ioprio = bio_prio(bio); | |
53ffabfd CK |
996 | } |
997 | ||
fb01a293 ML |
998 | void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, |
999 | struct lock_class_key *key); | |
8cf7961d | 1000 | |
24b83deb CH |
1001 | static inline bool rq_is_sync(struct request *rq) |
1002 | { | |
1003 | return op_is_sync(rq->cmd_flags); | |
1004 | } | |
1005 | ||
1006 | void blk_rq_init(struct request_queue *q, struct request *rq); | |
24b83deb CH |
1007 | int blk_rq_prep_clone(struct request *rq, struct request *rq_src, |
1008 | struct bio_set *bs, gfp_t gfp_mask, | |
1009 | int (*bio_ctr)(struct bio *, struct bio *, void *), void *data); | |
1010 | void blk_rq_unprep_clone(struct request *rq); | |
28db4711 | 1011 | blk_status_t blk_insert_cloned_request(struct request *rq); |
24b83deb CH |
1012 | |
1013 | struct rq_map_data { | |
1014 | struct page **pages; | |
24b83deb | 1015 | unsigned long offset; |
f5d632d1 JA |
1016 | unsigned short page_order; |
1017 | unsigned short nr_entries; | |
1018 | bool null_mapped; | |
1019 | bool from_user; | |
24b83deb CH |
1020 | }; |
1021 | ||
1022 | int blk_rq_map_user(struct request_queue *, struct request *, | |
1023 | struct rq_map_data *, void __user *, unsigned long, gfp_t); | |
55765402 AG |
1024 | int blk_rq_map_user_io(struct request *, struct rq_map_data *, |
1025 | void __user *, unsigned long, gfp_t, bool, int, bool, int); | |
24b83deb CH |
1026 | int blk_rq_map_user_iov(struct request_queue *, struct request *, |
1027 | struct rq_map_data *, const struct iov_iter *, gfp_t); | |
1028 | int blk_rq_unmap_user(struct bio *); | |
1029 | int blk_rq_map_kern(struct request_queue *, struct request *, void *, | |
1030 | unsigned int, gfp_t); | |
1031 | int blk_rq_append_bio(struct request *rq, struct bio *bio); | |
e2e53086 | 1032 | void blk_execute_rq_nowait(struct request *rq, bool at_head); |
b84ba30b | 1033 | blk_status_t blk_execute_rq(struct request *rq, bool at_head); |
c6e99ea4 | 1034 | bool blk_rq_is_poll(struct request *rq); |
24b83deb CH |
1035 | |
1036 | struct req_iterator { | |
1037 | struct bvec_iter iter; | |
1038 | struct bio *bio; | |
1039 | }; | |
1040 | ||
1041 | #define __rq_for_each_bio(_bio, rq) \ | |
1042 | if ((rq->bio)) \ | |
1043 | for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) | |
1044 | ||
1045 | #define rq_for_each_segment(bvl, _rq, _iter) \ | |
1046 | __rq_for_each_bio(_iter.bio, _rq) \ | |
1047 | bio_for_each_segment(bvl, _iter.bio, _iter.iter) | |
1048 | ||
1049 | #define rq_for_each_bvec(bvl, _rq, _iter) \ | |
1050 | __rq_for_each_bio(_iter.bio, _rq) \ | |
1051 | bio_for_each_bvec(bvl, _iter.bio, _iter.iter) | |
1052 | ||
1053 | #define rq_iter_last(bvec, _iter) \ | |
1054 | (_iter.bio->bi_next == NULL && \ | |
1055 | bio_iter_last(bvec, _iter.iter)) | |
1056 | ||
1057 | /* | |
1058 | * blk_rq_pos() : the current sector | |
1059 | * blk_rq_bytes() : bytes left in the entire request | |
1060 | * blk_rq_cur_bytes() : bytes left in the current segment | |
24b83deb CH |
1061 | * blk_rq_sectors() : sectors left in the entire request |
1062 | * blk_rq_cur_sectors() : sectors left in the current segment | |
1063 | * blk_rq_stats_sectors() : sectors of the entire request used for stats | |
1064 | */ | |
1065 | static inline sector_t blk_rq_pos(const struct request *rq) | |
1066 | { | |
1067 | return rq->__sector; | |
1068 | } | |
1069 | ||
1070 | static inline unsigned int blk_rq_bytes(const struct request *rq) | |
1071 | { | |
1072 | return rq->__data_len; | |
1073 | } | |
1074 | ||
1075 | static inline int blk_rq_cur_bytes(const struct request *rq) | |
1076 | { | |
b6559d8f CH |
1077 | if (!rq->bio) |
1078 | return 0; | |
1079 | if (!bio_has_data(rq->bio)) /* dataless requests such as discard */ | |
1080 | return rq->bio->bi_iter.bi_size; | |
1081 | return bio_iovec(rq->bio).bv_len; | |
24b83deb CH |
1082 | } |
1083 | ||
24b83deb CH |
1084 | static inline unsigned int blk_rq_sectors(const struct request *rq) |
1085 | { | |
1086 | return blk_rq_bytes(rq) >> SECTOR_SHIFT; | |
1087 | } | |
1088 | ||
1089 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | |
1090 | { | |
1091 | return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; | |
1092 | } | |
1093 | ||
1094 | static inline unsigned int blk_rq_stats_sectors(const struct request *rq) | |
1095 | { | |
1096 | return rq->stats_sectors; | |
1097 | } | |
1098 | ||
1099 | /* | |
1100 | * Some commands like WRITE SAME have a payload or data transfer size which | |
1101 | * is different from the size of the request. Any driver that supports such | |
1102 | * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to | |
1103 | * calculate the data transfer size. | |
1104 | */ | |
1105 | static inline unsigned int blk_rq_payload_bytes(struct request *rq) | |
1106 | { | |
1107 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) | |
1108 | return rq->special_vec.bv_len; | |
1109 | return blk_rq_bytes(rq); | |
1110 | } | |
1111 | ||
1112 | /* | |
1113 | * Return the first full biovec in the request. The caller needs to check that | |
1114 | * there are any bvecs before calling this helper. | |
1115 | */ | |
1116 | static inline struct bio_vec req_bvec(struct request *rq) | |
1117 | { | |
1118 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) | |
1119 | return rq->special_vec; | |
1120 | return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); | |
1121 | } | |
1122 | ||
1123 | static inline unsigned int blk_rq_count_bios(struct request *rq) | |
1124 | { | |
1125 | unsigned int nr_bios = 0; | |
1126 | struct bio *bio; | |
1127 | ||
1128 | __rq_for_each_bio(bio, rq) | |
1129 | nr_bios++; | |
1130 | ||
1131 | return nr_bios; | |
1132 | } | |
1133 | ||
1134 | void blk_steal_bios(struct bio_list *list, struct request *rq); | |
1135 | ||
1136 | /* | |
1137 | * Request completion related functions. | |
1138 | * | |
1139 | * blk_update_request() completes given number of bytes and updates | |
1140 | * the request without completing it. | |
1141 | */ | |
1142 | bool blk_update_request(struct request *rq, blk_status_t error, | |
1143 | unsigned int nr_bytes); | |
1144 | void blk_abort_request(struct request *); | |
1145 | ||
1146 | /* | |
1147 | * Number of physical segments as sent to the device. | |
1148 | * | |
1149 | * Normally this is the number of discontiguous data segments sent by the | |
1150 | * submitter. But for data-less command like discard we might have no | |
1151 | * actual data segments submitted, but the driver might have to add it's | |
1152 | * own special payload. In that case we still return 1 here so that this | |
1153 | * special payload will be mapped. | |
1154 | */ | |
1155 | static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) | |
1156 | { | |
1157 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) | |
1158 | return 1; | |
1159 | return rq->nr_phys_segments; | |
1160 | } | |
1161 | ||
1162 | /* | |
1163 | * Number of discard segments (or ranges) the driver needs to fill in. | |
1164 | * Each discard bio merged into a request is counted as one segment. | |
1165 | */ | |
1166 | static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) | |
1167 | { | |
1168 | return max_t(unsigned short, rq->nr_phys_segments, 1); | |
1169 | } | |
1170 | ||
1171 | int __blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
1172 | struct scatterlist *sglist, struct scatterlist **last_sg); | |
1173 | static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
1174 | struct scatterlist *sglist) | |
1175 | { | |
1176 | struct scatterlist *last_sg = NULL; | |
1177 | ||
1178 | return __blk_rq_map_sg(q, rq, sglist, &last_sg); | |
1179 | } | |
1180 | void blk_dump_rq_flags(struct request *, char *); | |
1181 | ||
24b83deb | 1182 | #endif /* BLK_MQ_H */ |