Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
320ae51f JA |
2 | #ifndef BLK_MQ_H |
3 | #define BLK_MQ_H | |
4 | ||
5 | #include <linux/blkdev.h> | |
88459642 | 6 | #include <linux/sbitmap.h> |
fb01a293 | 7 | #include <linux/lockdep.h> |
24b83deb | 8 | #include <linux/scatterlist.h> |
e028f167 | 9 | #include <linux/prefetch.h> |
320ae51f JA |
10 | |
11 | struct blk_mq_tags; | |
f70ced09 | 12 | struct blk_flush_queue; |
320ae51f | 13 | |
24b83deb | 14 | #define BLKDEV_MIN_RQ 4 |
d2a27964 | 15 | #define BLKDEV_DEFAULT_RQ 128 |
24b83deb | 16 | |
de671d61 JA |
17 | enum rq_end_io_ret { |
18 | RQ_END_IO_NONE, | |
19 | RQ_END_IO_FREE, | |
20 | }; | |
21 | ||
22 | typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t); | |
24b83deb CH |
23 | |
24 | /* | |
25 | * request flags */ | |
26 | typedef __u32 __bitwise req_flags_t; | |
27 | ||
28 | /* drive already may have started this one */ | |
29 | #define RQF_STARTED ((__force req_flags_t)(1 << 1)) | |
30 | /* may not be passed by ioscheduler */ | |
31 | #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) | |
32 | /* request for flush sequence */ | |
33 | #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) | |
34 | /* merge of different types, fail separately */ | |
35 | #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) | |
36 | /* track inflight for MQ */ | |
37 | #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) | |
38 | /* don't call prep for this one */ | |
39 | #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) | |
40 | /* vaguely specified driver internal error. Ignored by the block layer */ | |
41 | #define RQF_FAILED ((__force req_flags_t)(1 << 10)) | |
42 | /* don't warn about errors */ | |
43 | #define RQF_QUIET ((__force req_flags_t)(1 << 11)) | |
44 | /* elevator private data attached */ | |
45 | #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) | |
46 | /* account into disk and partition IO statistics */ | |
47 | #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) | |
48 | /* runtime pm request */ | |
49 | #define RQF_PM ((__force req_flags_t)(1 << 15)) | |
50 | /* on IO scheduler merge hash */ | |
51 | #define RQF_HASHED ((__force req_flags_t)(1 << 16)) | |
52 | /* track IO completion time */ | |
53 | #define RQF_STATS ((__force req_flags_t)(1 << 17)) | |
54 | /* Look at ->special_vec for the actual data payload instead of the | |
55 | bio chain. */ | |
56 | #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) | |
57 | /* The per-zone write lock is held for this request */ | |
58 | #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) | |
59 | /* already slept for hybrid poll */ | |
60 | #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) | |
61 | /* ->timeout has been called, don't expire again */ | |
62 | #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) | |
2ff0682d JA |
63 | /* queue has elevator attached */ |
64 | #define RQF_ELV ((__force req_flags_t)(1 << 22)) | |
99e48cd6 | 65 | #define RQF_RESV ((__force req_flags_t)(1 << 23)) |
24b83deb CH |
66 | |
67 | /* flags that prevent us from merging requests: */ | |
68 | #define RQF_NOMERGE_FLAGS \ | |
69 | (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) | |
70 | ||
71 | enum mq_rq_state { | |
72 | MQ_RQ_IDLE = 0, | |
73 | MQ_RQ_IN_FLIGHT = 1, | |
74 | MQ_RQ_COMPLETE = 2, | |
75 | }; | |
76 | ||
77 | /* | |
78 | * Try to put the fields that are referenced together in the same cacheline. | |
79 | * | |
80 | * If you modify this structure, make sure to update blk_rq_init() and | |
81 | * especially blk_mq_rq_ctx_init() to take care of the added fields. | |
82 | */ | |
83 | struct request { | |
84 | struct request_queue *q; | |
85 | struct blk_mq_ctx *mq_ctx; | |
86 | struct blk_mq_hw_ctx *mq_hctx; | |
87 | ||
16458cf3 | 88 | blk_opf_t cmd_flags; /* op and common flags */ |
24b83deb CH |
89 | req_flags_t rq_flags; |
90 | ||
91 | int tag; | |
92 | int internal_tag; | |
93 | ||
b6087629 JA |
94 | unsigned int timeout; |
95 | ||
24b83deb CH |
96 | /* the following two fields are internal, NEVER access directly */ |
97 | unsigned int __data_len; /* total data len */ | |
98 | sector_t __sector; /* sector cursor */ | |
99 | ||
100 | struct bio *bio; | |
101 | struct bio *biotail; | |
102 | ||
47c122e3 JA |
103 | union { |
104 | struct list_head queuelist; | |
105 | struct request *rq_next; | |
106 | }; | |
24b83deb | 107 | |
24b83deb CH |
108 | struct block_device *part; |
109 | #ifdef CONFIG_BLK_RQ_ALLOC_TIME | |
110 | /* Time that the first bio started allocating this request. */ | |
111 | u64 alloc_time_ns; | |
112 | #endif | |
113 | /* Time that this request was allocated for this IO. */ | |
114 | u64 start_time_ns; | |
115 | /* Time that I/O was submitted to the device. */ | |
116 | u64 io_start_time_ns; | |
117 | ||
118 | #ifdef CONFIG_BLK_WBT | |
119 | unsigned short wbt_flags; | |
120 | #endif | |
121 | /* | |
122 | * rq sectors used for blk stats. It has the same value | |
123 | * with blk_rq_sectors(rq), except that it never be zeroed | |
124 | * by completion. | |
125 | */ | |
126 | unsigned short stats_sectors; | |
127 | ||
128 | /* | |
129 | * Number of scatter-gather DMA addr+len pairs after | |
130 | * physical address coalescing is performed. | |
131 | */ | |
132 | unsigned short nr_phys_segments; | |
133 | ||
134 | #ifdef CONFIG_BLK_DEV_INTEGRITY | |
135 | unsigned short nr_integrity_segments; | |
136 | #endif | |
137 | ||
138 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | |
139 | struct bio_crypt_ctx *crypt_ctx; | |
cb77cb5a | 140 | struct blk_crypto_keyslot *crypt_keyslot; |
24b83deb CH |
141 | #endif |
142 | ||
143 | unsigned short write_hint; | |
144 | unsigned short ioprio; | |
145 | ||
146 | enum mq_rq_state state; | |
0a467d0f | 147 | atomic_t ref; |
24b83deb | 148 | |
24b83deb CH |
149 | unsigned long deadline; |
150 | ||
b6087629 JA |
151 | /* |
152 | * The hash is used inside the scheduler, and killed once the | |
153 | * request reaches the dispatch list. The ipi_list is only used | |
154 | * to queue the request for softirq completion, which is long | |
155 | * after the request has been unhashed (and even removed from | |
156 | * the dispatch list). | |
157 | */ | |
158 | union { | |
159 | struct hlist_node hash; /* merge hash */ | |
160 | struct llist_node ipi_list; | |
161 | }; | |
162 | ||
163 | /* | |
164 | * The rb_node is only used inside the io scheduler, requests | |
165 | * are pruned when moved to the dispatch queue. So let the | |
166 | * completion_data share space with the rb_node. | |
167 | */ | |
168 | union { | |
169 | struct rb_node rb_node; /* sort/lookup */ | |
170 | struct bio_vec special_vec; | |
171 | void *completion_data; | |
b6087629 JA |
172 | }; |
173 | ||
174 | ||
175 | /* | |
176 | * Three pointers are available for the IO schedulers, if they need | |
177 | * more they have to dynamically allocate it. Flush requests are | |
178 | * never put on the IO scheduler. So let the flush fields share | |
179 | * space with the elevator data. | |
180 | */ | |
181 | union { | |
182 | struct { | |
183 | struct io_cq *icq; | |
184 | void *priv[2]; | |
185 | } elv; | |
186 | ||
187 | struct { | |
188 | unsigned int seq; | |
189 | struct list_head list; | |
190 | rq_end_io_fn *saved_end_io; | |
191 | } flush; | |
192 | }; | |
193 | ||
24b83deb CH |
194 | union { |
195 | struct __call_single_data csd; | |
196 | u64 fifo_time; | |
197 | }; | |
198 | ||
199 | /* | |
200 | * completion callback. | |
201 | */ | |
202 | rq_end_io_fn *end_io; | |
203 | void *end_io_data; | |
204 | }; | |
205 | ||
2d9b02be BVA |
206 | static inline enum req_op req_op(const struct request *req) |
207 | { | |
208 | return req->cmd_flags & REQ_OP_MASK; | |
209 | } | |
24b83deb CH |
210 | |
211 | static inline bool blk_rq_is_passthrough(struct request *rq) | |
212 | { | |
213 | return blk_op_is_passthrough(req_op(rq)); | |
214 | } | |
215 | ||
216 | static inline unsigned short req_get_ioprio(struct request *req) | |
217 | { | |
218 | return req->ioprio; | |
219 | } | |
220 | ||
221 | #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) | |
222 | ||
223 | #define rq_dma_dir(rq) \ | |
224 | (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) | |
225 | ||
edce22e1 KB |
226 | #define rq_list_add(listptr, rq) do { \ |
227 | (rq)->rq_next = *(listptr); \ | |
228 | *(listptr) = rq; \ | |
229 | } while (0) | |
230 | ||
231 | #define rq_list_pop(listptr) \ | |
232 | ({ \ | |
233 | struct request *__req = NULL; \ | |
234 | if ((listptr) && *(listptr)) { \ | |
235 | __req = *(listptr); \ | |
236 | *(listptr) = __req->rq_next; \ | |
237 | } \ | |
238 | __req; \ | |
239 | }) | |
240 | ||
241 | #define rq_list_peek(listptr) \ | |
242 | ({ \ | |
243 | struct request *__req = NULL; \ | |
244 | if ((listptr) && *(listptr)) \ | |
245 | __req = *(listptr); \ | |
246 | __req; \ | |
247 | }) | |
248 | ||
249 | #define rq_list_for_each(listptr, pos) \ | |
250 | for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos)) | |
251 | ||
3764fd05 KB |
252 | #define rq_list_for_each_safe(listptr, pos, nxt) \ |
253 | for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \ | |
254 | pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL) | |
255 | ||
edce22e1 KB |
256 | #define rq_list_next(rq) (rq)->rq_next |
257 | #define rq_list_empty(list) ((list) == (struct request *) NULL) | |
258 | ||
d2528be7 KB |
259 | /** |
260 | * rq_list_move() - move a struct request from one list to another | |
261 | * @src: The source list @rq is currently in | |
262 | * @dst: The destination list that @rq will be appended to | |
263 | * @rq: The request to move | |
264 | * @prev: The request preceding @rq in @src (NULL if @rq is the head) | |
265 | */ | |
292c33c9 | 266 | static inline void rq_list_move(struct request **src, struct request **dst, |
d2528be7 KB |
267 | struct request *rq, struct request *prev) |
268 | { | |
269 | if (prev) | |
270 | prev->rq_next = rq->rq_next; | |
271 | else | |
272 | *src = rq->rq_next; | |
273 | rq_list_add(dst, rq); | |
274 | } | |
275 | ||
b2bed51a BVA |
276 | /** |
277 | * enum blk_eh_timer_return - How the timeout handler should proceed | |
278 | * @BLK_EH_DONE: The block driver completed the command or will complete it at | |
279 | * a later time. | |
280 | * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the | |
281 | * request to complete. | |
282 | */ | |
24b83deb | 283 | enum blk_eh_timer_return { |
b2bed51a BVA |
284 | BLK_EH_DONE, |
285 | BLK_EH_RESET_TIMER, | |
24b83deb CH |
286 | }; |
287 | ||
288 | #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ | |
289 | #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ | |
290 | ||
fe644072 | 291 | /** |
d386732b AA |
292 | * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware |
293 | * block device | |
fe644072 | 294 | */ |
320ae51f JA |
295 | struct blk_mq_hw_ctx { |
296 | struct { | |
d386732b | 297 | /** @lock: Protects the dispatch list. */ |
320ae51f | 298 | spinlock_t lock; |
d386732b AA |
299 | /** |
300 | * @dispatch: Used for requests that are ready to be | |
301 | * dispatched to the hardware but for some reason (e.g. lack of | |
302 | * resources) could not be sent to the hardware. As soon as the | |
303 | * driver can send new requests, requests at this list will | |
304 | * be sent first for a fairer dispatch. | |
305 | */ | |
320ae51f | 306 | struct list_head dispatch; |
d386732b AA |
307 | /** |
308 | * @state: BLK_MQ_S_* flags. Defines the state of the hw | |
309 | * queue (active, scheduled to restart, stopped). | |
310 | */ | |
311 | unsigned long state; | |
320ae51f JA |
312 | } ____cacheline_aligned_in_smp; |
313 | ||
d386732b AA |
314 | /** |
315 | * @run_work: Used for scheduling a hardware queue run at a later time. | |
316 | */ | |
9f993737 | 317 | struct delayed_work run_work; |
d386732b | 318 | /** @cpumask: Map of available CPUs where this hctx can run. */ |
e4043dcf | 319 | cpumask_var_t cpumask; |
d386732b AA |
320 | /** |
321 | * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU | |
322 | * selection from @cpumask. | |
323 | */ | |
506e931f | 324 | int next_cpu; |
d386732b AA |
325 | /** |
326 | * @next_cpu_batch: Counter of how many works left in the batch before | |
327 | * changing to the next CPU. | |
328 | */ | |
506e931f | 329 | int next_cpu_batch; |
320ae51f | 330 | |
d386732b AA |
331 | /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */ |
332 | unsigned long flags; | |
320ae51f | 333 | |
d386732b AA |
334 | /** |
335 | * @sched_data: Pointer owned by the IO scheduler attached to a request | |
336 | * queue. It's up to the IO scheduler how to use this pointer. | |
337 | */ | |
bd166ef1 | 338 | void *sched_data; |
d386732b AA |
339 | /** |
340 | * @queue: Pointer to the request queue that owns this hardware context. | |
341 | */ | |
320ae51f | 342 | struct request_queue *queue; |
d386732b | 343 | /** @fq: Queue of requests that need to perform a flush operation. */ |
f70ced09 | 344 | struct blk_flush_queue *fq; |
320ae51f | 345 | |
d386732b AA |
346 | /** |
347 | * @driver_data: Pointer to data owned by the block driver that created | |
348 | * this hctx | |
349 | */ | |
320ae51f JA |
350 | void *driver_data; |
351 | ||
d386732b AA |
352 | /** |
353 | * @ctx_map: Bitmap for each software queue. If bit is on, there is a | |
354 | * pending request in that software queue. | |
355 | */ | |
88459642 | 356 | struct sbitmap ctx_map; |
1429d7c9 | 357 | |
d386732b AA |
358 | /** |
359 | * @dispatch_from: Software queue to be used when no scheduler was | |
360 | * selected. | |
361 | */ | |
b347689f | 362 | struct blk_mq_ctx *dispatch_from; |
d386732b AA |
363 | /** |
364 | * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to | |
365 | * decide if the hw_queue is busy using Exponential Weighted Moving | |
366 | * Average algorithm. | |
367 | */ | |
6e768717 | 368 | unsigned int dispatch_busy; |
b347689f | 369 | |
d386732b | 370 | /** @type: HCTX_TYPE_* flags. Type of hardware queue. */ |
f31967f0 | 371 | unsigned short type; |
d386732b | 372 | /** @nr_ctx: Number of software queues. */ |
f31967f0 | 373 | unsigned short nr_ctx; |
d386732b | 374 | /** @ctxs: Array of software queues. */ |
6e768717 | 375 | struct blk_mq_ctx **ctxs; |
4bb659b1 | 376 | |
d386732b | 377 | /** @dispatch_wait_lock: Lock for dispatch_wait queue. */ |
5815839b | 378 | spinlock_t dispatch_wait_lock; |
d386732b AA |
379 | /** |
380 | * @dispatch_wait: Waitqueue to put requests when there is no tag | |
381 | * available at the moment, to wait for another try in the future. | |
382 | */ | |
eb619fdb | 383 | wait_queue_entry_t dispatch_wait; |
d386732b AA |
384 | |
385 | /** | |
386 | * @wait_index: Index of next available dispatch_wait queue to insert | |
387 | * requests. | |
388 | */ | |
8537b120 | 389 | atomic_t wait_index; |
320ae51f | 390 | |
d386732b AA |
391 | /** |
392 | * @tags: Tags owned by the block driver. A tag at this set is only | |
393 | * assigned when a request is dispatched from a hardware queue. | |
394 | */ | |
320ae51f | 395 | struct blk_mq_tags *tags; |
d386732b AA |
396 | /** |
397 | * @sched_tags: Tags owned by I/O scheduler. If there is an I/O | |
398 | * scheduler associated with a request queue, a tag is assigned when | |
399 | * that request is allocated. Else, this member is not used. | |
400 | */ | |
bd166ef1 | 401 | struct blk_mq_tags *sched_tags; |
320ae51f | 402 | |
d386732b | 403 | /** @queued: Number of queued requests. */ |
320ae51f | 404 | unsigned long queued; |
d386732b | 405 | /** @run: Number of dispatched requests. */ |
320ae51f | 406 | unsigned long run; |
320ae51f | 407 | |
d386732b | 408 | /** @numa_node: NUMA node the storage adapter has been connected to. */ |
320ae51f | 409 | unsigned int numa_node; |
d386732b | 410 | /** @queue_num: Index of this hardware queue. */ |
17ded320 | 411 | unsigned int queue_num; |
320ae51f | 412 | |
d386732b AA |
413 | /** |
414 | * @nr_active: Number of active requests. Only used when a tag set is | |
415 | * shared across request queues. | |
416 | */ | |
0d2602ca JA |
417 | atomic_t nr_active; |
418 | ||
bf0beec0 ML |
419 | /** @cpuhp_online: List to store request if CPU is going to die */ |
420 | struct hlist_node cpuhp_online; | |
d386732b | 421 | /** @cpuhp_dead: List to store request if some CPU die. */ |
9467f859 | 422 | struct hlist_node cpuhp_dead; |
d386732b | 423 | /** @kobj: Kernel object for sysfs. */ |
320ae51f | 424 | struct kobject kobj; |
05229bee | 425 | |
9c1051aa | 426 | #ifdef CONFIG_BLK_DEBUG_FS |
d386732b AA |
427 | /** |
428 | * @debugfs_dir: debugfs directory for this hardware queue. Named | |
429 | * as cpu<cpu_number>. | |
430 | */ | |
9c1051aa | 431 | struct dentry *debugfs_dir; |
d386732b | 432 | /** @sched_debugfs_dir: debugfs directory for the scheduler. */ |
d332ce09 | 433 | struct dentry *sched_debugfs_dir; |
9c1051aa | 434 | #endif |
07319678 | 435 | |
2dd209f0 BVA |
436 | /** |
437 | * @hctx_list: if this hctx is not in use, this is an entry in | |
438 | * q->unused_hctx_list. | |
439 | */ | |
2f8f1336 | 440 | struct list_head hctx_list; |
320ae51f JA |
441 | }; |
442 | ||
7a18312c | 443 | /** |
d386732b | 444 | * struct blk_mq_queue_map - Map software queues to hardware queues |
7a18312c BVA |
445 | * @mq_map: CPU ID to hardware queue index map. This is an array |
446 | * with nr_cpu_ids elements. Each element has a value in the range | |
447 | * [@queue_offset, @queue_offset + @nr_queues). | |
448 | * @nr_queues: Number of hardware queues to map CPU IDs onto. | |
449 | * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe | |
450 | * driver to map each hardware queue type (enum hctx_type) onto a distinct | |
451 | * set of hardware queues. | |
452 | */ | |
ed76e329 JA |
453 | struct blk_mq_queue_map { |
454 | unsigned int *mq_map; | |
455 | unsigned int nr_queues; | |
843477d4 | 456 | unsigned int queue_offset; |
ed76e329 JA |
457 | }; |
458 | ||
d386732b AA |
459 | /** |
460 | * enum hctx_type - Type of hardware queue | |
461 | * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for. | |
462 | * @HCTX_TYPE_READ: Just for READ I/O. | |
463 | * @HCTX_TYPE_POLL: Polled I/O of any kind. | |
464 | * @HCTX_MAX_TYPES: Number of types of hctx. | |
465 | */ | |
e20ba6e1 | 466 | enum hctx_type { |
d386732b AA |
467 | HCTX_TYPE_DEFAULT, |
468 | HCTX_TYPE_READ, | |
469 | HCTX_TYPE_POLL, | |
e20ba6e1 CH |
470 | |
471 | HCTX_MAX_TYPES, | |
ed76e329 JA |
472 | }; |
473 | ||
7a18312c BVA |
474 | /** |
475 | * struct blk_mq_tag_set - tag set that can be shared between request queues | |
476 | * @map: One or more ctx -> hctx mappings. One map exists for each | |
477 | * hardware queue type (enum hctx_type) that the driver wishes | |
478 | * to support. There are no restrictions on maps being of the | |
479 | * same size, and it's perfectly legal to share maps between | |
480 | * types. | |
481 | * @nr_maps: Number of elements in the @map array. A number in the range | |
482 | * [1, HCTX_MAX_TYPES]. | |
483 | * @ops: Pointers to functions that implement block driver behavior. | |
484 | * @nr_hw_queues: Number of hardware queues supported by the block driver that | |
485 | * owns this data structure. | |
486 | * @queue_depth: Number of tags per hardware queue, reserved tags included. | |
487 | * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag | |
488 | * allocations. | |
489 | * @cmd_size: Number of additional bytes to allocate per request. The block | |
490 | * driver owns these additional bytes. | |
491 | * @numa_node: NUMA node the storage adapter has been connected to. | |
492 | * @timeout: Request processing timeout in jiffies. | |
493 | * @flags: Zero or more BLK_MQ_F_* flags. | |
494 | * @driver_data: Pointer to data owned by the block driver that created this | |
495 | * tag set. | |
496 | * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues | |
497 | * elements. | |
079a2e3e JG |
498 | * @shared_tags: |
499 | * Shared set of tags. Has @nr_hw_queues elements. If set, | |
500 | * shared by all @tags. | |
7a18312c BVA |
501 | * @tag_list_lock: Serializes tag_list accesses. |
502 | * @tag_list: List of the request queues that use this tag set. See also | |
503 | * request_queue.tag_set_list. | |
504 | */ | |
24d2f903 | 505 | struct blk_mq_tag_set { |
ed76e329 | 506 | struct blk_mq_queue_map map[HCTX_MAX_TYPES]; |
7a18312c | 507 | unsigned int nr_maps; |
f8a5b122 | 508 | const struct blk_mq_ops *ops; |
7a18312c BVA |
509 | unsigned int nr_hw_queues; |
510 | unsigned int queue_depth; | |
320ae51f | 511 | unsigned int reserved_tags; |
7a18312c | 512 | unsigned int cmd_size; |
320ae51f JA |
513 | int numa_node; |
514 | unsigned int timeout; | |
7a18312c | 515 | unsigned int flags; |
24d2f903 CH |
516 | void *driver_data; |
517 | ||
518 | struct blk_mq_tags **tags; | |
0d2602ca | 519 | |
079a2e3e | 520 | struct blk_mq_tags *shared_tags; |
e155b0c2 | 521 | |
0d2602ca JA |
522 | struct mutex tag_list_lock; |
523 | struct list_head tag_list; | |
320ae51f JA |
524 | }; |
525 | ||
d386732b AA |
526 | /** |
527 | * struct blk_mq_queue_data - Data about a request inserted in a queue | |
528 | * | |
529 | * @rq: Request pointer. | |
530 | * @last: If it is the last request in the queue. | |
531 | */ | |
74c45052 JA |
532 | struct blk_mq_queue_data { |
533 | struct request *rq; | |
74c45052 JA |
534 | bool last; |
535 | }; | |
536 | ||
2dd6532e | 537 | typedef bool (busy_tag_iter_fn)(struct request *, void *); |
05229bee | 538 | |
d386732b AA |
539 | /** |
540 | * struct blk_mq_ops - Callback functions that implements block driver | |
541 | * behaviour. | |
542 | */ | |
320ae51f | 543 | struct blk_mq_ops { |
d386732b AA |
544 | /** |
545 | * @queue_rq: Queue a new request from block IO. | |
320ae51f | 546 | */ |
0516c2f6 DW |
547 | blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, |
548 | const struct blk_mq_queue_data *); | |
320ae51f | 549 | |
d386732b AA |
550 | /** |
551 | * @commit_rqs: If a driver uses bd->last to judge when to submit | |
552 | * requests to hardware, it must define this function. In case of errors | |
553 | * that make us stop issuing further requests, this hook serves the | |
d666ba98 JA |
554 | * purpose of kicking the hardware (which the last request otherwise |
555 | * would have done). | |
556 | */ | |
0516c2f6 | 557 | void (*commit_rqs)(struct blk_mq_hw_ctx *); |
d666ba98 | 558 | |
3c67d44d JA |
559 | /** |
560 | * @queue_rqs: Queue a list of new requests. Driver is guaranteed | |
561 | * that each request belongs to the same queue. If the driver doesn't | |
562 | * empty the @rqlist completely, then the rest will be queued | |
563 | * individually by the block layer upon return. | |
564 | */ | |
565 | void (*queue_rqs)(struct request **rqlist); | |
566 | ||
d386732b AA |
567 | /** |
568 | * @get_budget: Reserve budget before queue request, once .queue_rq is | |
de148297 ML |
569 | * run, it is driver's responsibility to release the |
570 | * reserved budget. Also we have to handle failure case | |
571 | * of .get_budget for avoiding I/O deadlock. | |
572 | */ | |
2a5a24aa | 573 | int (*get_budget)(struct request_queue *); |
0516c2f6 | 574 | |
d386732b AA |
575 | /** |
576 | * @put_budget: Release the reserved budget. | |
577 | */ | |
2a5a24aa | 578 | void (*put_budget)(struct request_queue *, int); |
de148297 | 579 | |
85367040 ML |
580 | /** |
581 | * @set_rq_budget_token: store rq's budget token | |
d022d18c ML |
582 | */ |
583 | void (*set_rq_budget_token)(struct request *, int); | |
85367040 ML |
584 | /** |
585 | * @get_rq_budget_token: retrieve rq's budget token | |
d022d18c ML |
586 | */ |
587 | int (*get_rq_budget_token)(struct request *); | |
588 | ||
d386732b AA |
589 | /** |
590 | * @timeout: Called on request timeout. | |
320ae51f | 591 | */ |
9bdb4833 | 592 | enum blk_eh_timer_return (*timeout)(struct request *); |
320ae51f | 593 | |
d386732b AA |
594 | /** |
595 | * @poll: Called to poll for completion of a specific tag. | |
05229bee | 596 | */ |
5a72e899 | 597 | int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); |
05229bee | 598 | |
d386732b AA |
599 | /** |
600 | * @complete: Mark the request as complete. | |
601 | */ | |
0516c2f6 | 602 | void (*complete)(struct request *); |
30a91cb4 | 603 | |
d386732b AA |
604 | /** |
605 | * @init_hctx: Called when the block layer side of a hardware queue has | |
606 | * been set up, allowing the driver to allocate/init matching | |
607 | * structures. | |
320ae51f | 608 | */ |
0516c2f6 | 609 | int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); |
d386732b AA |
610 | /** |
611 | * @exit_hctx: Ditto for exit/teardown. | |
612 | */ | |
0516c2f6 | 613 | void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); |
e9b267d9 | 614 | |
d386732b AA |
615 | /** |
616 | * @init_request: Called for every command allocated by the block layer | |
617 | * to allow the driver to set up driver specific data. | |
f70ced09 ML |
618 | * |
619 | * Tag greater than or equal to queue_depth is for setting up | |
620 | * flush request. | |
e9b267d9 | 621 | */ |
0516c2f6 DW |
622 | int (*init_request)(struct blk_mq_tag_set *set, struct request *, |
623 | unsigned int, unsigned int); | |
d386732b AA |
624 | /** |
625 | * @exit_request: Ditto for exit/teardown. | |
626 | */ | |
0516c2f6 DW |
627 | void (*exit_request)(struct blk_mq_tag_set *set, struct request *, |
628 | unsigned int); | |
d386732b | 629 | |
d386732b AA |
630 | /** |
631 | * @cleanup_rq: Called before freeing one request which isn't completed | |
632 | * yet, and usually for freeing the driver private data. | |
226b4fc7 | 633 | */ |
0516c2f6 | 634 | void (*cleanup_rq)(struct request *); |
226b4fc7 | 635 | |
d386732b AA |
636 | /** |
637 | * @busy: If set, returns whether or not this queue currently is busy. | |
9ba20527 | 638 | */ |
0516c2f6 | 639 | bool (*busy)(struct request_queue *); |
9ba20527 | 640 | |
d386732b AA |
641 | /** |
642 | * @map_queues: This allows drivers specify their own queue mapping by | |
643 | * overriding the setup-time function that builds the mq_map. | |
644 | */ | |
a4e1d0b7 | 645 | void (*map_queues)(struct blk_mq_tag_set *set); |
2836ee4b BVA |
646 | |
647 | #ifdef CONFIG_BLK_DEBUG_FS | |
d386732b AA |
648 | /** |
649 | * @show_rq: Used by the debugfs implementation to show driver-specific | |
2836ee4b BVA |
650 | * information about a request. |
651 | */ | |
652 | void (*show_rq)(struct seq_file *m, struct request *rq); | |
653 | #endif | |
320ae51f JA |
654 | }; |
655 | ||
656 | enum { | |
320ae51f | 657 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
51db1c37 | 658 | BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1, |
bf0beec0 ML |
659 | /* |
660 | * Set when this device requires underlying blk-mq device for | |
661 | * completing IO: | |
662 | */ | |
663 | BLK_MQ_F_STACKING = 1 << 2, | |
32bc15af | 664 | BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, |
1b792f2f | 665 | BLK_MQ_F_BLOCKING = 1 << 5, |
90b71980 | 666 | /* Do not allow an I/O scheduler to be configured. */ |
d3484991 | 667 | BLK_MQ_F_NO_SCHED = 1 << 6, |
90b71980 BVA |
668 | /* |
669 | * Select 'none' during queue registration in case of a single hwq | |
670 | * or shared hwqs instead of 'mq-deadline'. | |
671 | */ | |
672 | BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7, | |
24391c0d SL |
673 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
674 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, | |
320ae51f | 675 | |
5d12f905 | 676 | BLK_MQ_S_STOPPED = 0, |
0d2602ca | 677 | BLK_MQ_S_TAG_ACTIVE = 1, |
bd166ef1 | 678 | BLK_MQ_S_SCHED_RESTART = 2, |
320ae51f | 679 | |
bf0beec0 ML |
680 | /* hw queue is inactive after all its CPUs become offline */ |
681 | BLK_MQ_S_INACTIVE = 3, | |
682 | ||
a4391c64 | 683 | BLK_MQ_MAX_DEPTH = 10240, |
506e931f JA |
684 | |
685 | BLK_MQ_CPU_WORK_BATCH = 8, | |
320ae51f | 686 | }; |
24391c0d SL |
687 | #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ |
688 | ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ | |
689 | ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) | |
690 | #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ | |
691 | ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ | |
692 | << BLK_MQ_F_ALLOC_POLICY_START_BIT) | |
320ae51f | 693 | |
e155b0c2 JG |
694 | #define BLK_MQ_NO_HCTX_IDX (-1U) |
695 | ||
4dcc4874 CH |
696 | struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, |
697 | struct lock_class_key *lkclass); | |
b461dfc4 CH |
698 | #define blk_mq_alloc_disk(set, queuedata) \ |
699 | ({ \ | |
700 | static struct lock_class_key __key; \ | |
b461dfc4 | 701 | \ |
4dcc4874 | 702 | __blk_mq_alloc_disk(set, queuedata, &__key); \ |
b461dfc4 | 703 | }) |
6f8191fd CH |
704 | struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, |
705 | struct lock_class_key *lkclass); | |
24d2f903 | 706 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
26a9750a CH |
707 | int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, |
708 | struct request_queue *q); | |
6f8191fd | 709 | void blk_mq_destroy_queue(struct request_queue *); |
320ae51f | 710 | |
24d2f903 | 711 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); |
cdb14e0f CH |
712 | int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, |
713 | const struct blk_mq_ops *ops, unsigned int queue_depth, | |
714 | unsigned int set_flags); | |
24d2f903 CH |
715 | void blk_mq_free_tag_set(struct blk_mq_tag_set *set); |
716 | ||
320ae51f | 717 | void blk_mq_free_request(struct request *rq); |
6f3b0e8b | 718 | |
3c94d83c | 719 | bool blk_mq_queue_inflight(struct request_queue *q); |
ae879912 | 720 | |
6f3b0e8b | 721 | enum { |
9a95e4ef BVA |
722 | /* return when out of requests */ |
723 | BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), | |
724 | /* allocate from reserved pool */ | |
725 | BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), | |
0854bcdc BVA |
726 | /* set RQF_PM */ |
727 | BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2), | |
6f3b0e8b CH |
728 | }; |
729 | ||
16458cf3 | 730 | struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, |
9a95e4ef | 731 | blk_mq_req_flags_t flags); |
cd6ce148 | 732 | struct request *blk_mq_alloc_request_hctx(struct request_queue *q, |
16458cf3 | 733 | blk_opf_t opf, blk_mq_req_flags_t flags, |
9a95e4ef | 734 | unsigned int hctx_idx); |
e028f167 JA |
735 | |
736 | /* | |
737 | * Tag address space map. | |
738 | */ | |
739 | struct blk_mq_tags { | |
740 | unsigned int nr_tags; | |
741 | unsigned int nr_reserved_tags; | |
742 | ||
743 | atomic_t active_queues; | |
744 | ||
745 | struct sbitmap_queue bitmap_tags; | |
746 | struct sbitmap_queue breserved_tags; | |
747 | ||
748 | struct request **rqs; | |
749 | struct request **static_rqs; | |
750 | struct list_head page_list; | |
751 | ||
752 | /* | |
753 | * used to clear request reference in rqs[] before freeing one | |
754 | * request pool | |
755 | */ | |
756 | spinlock_t lock; | |
757 | }; | |
758 | ||
759 | static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, | |
760 | unsigned int tag) | |
761 | { | |
762 | if (tag < tags->nr_tags) { | |
763 | prefetch(tags->rqs[tag]); | |
764 | return tags->rqs[tag]; | |
765 | } | |
766 | ||
767 | return NULL; | |
768 | } | |
320ae51f | 769 | |
205fb5f5 BVA |
770 | enum { |
771 | BLK_MQ_UNIQUE_TAG_BITS = 16, | |
772 | BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, | |
773 | }; | |
774 | ||
775 | u32 blk_mq_unique_tag(struct request *rq); | |
776 | ||
777 | static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) | |
778 | { | |
779 | return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; | |
780 | } | |
781 | ||
782 | static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) | |
783 | { | |
784 | return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; | |
785 | } | |
786 | ||
27a46989 PB |
787 | /** |
788 | * blk_mq_rq_state() - read the current MQ_RQ_* state of a request | |
789 | * @rq: target request. | |
790 | */ | |
791 | static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) | |
792 | { | |
793 | return READ_ONCE(rq->state); | |
794 | } | |
795 | ||
796 | static inline int blk_mq_request_started(struct request *rq) | |
797 | { | |
798 | return blk_mq_rq_state(rq) != MQ_RQ_IDLE; | |
799 | } | |
800 | ||
801 | static inline int blk_mq_request_completed(struct request *rq) | |
802 | { | |
803 | return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; | |
804 | } | |
320ae51f | 805 | |
83fba8c8 CL |
806 | /* |
807 | * | |
808 | * Set the state to complete when completing a request from inside ->queue_rq. | |
809 | * This is used by drivers that want to ensure special complete actions that | |
810 | * need access to the request are called on failure, e.g. by nvme for | |
811 | * multipathing. | |
812 | */ | |
813 | static inline void blk_mq_set_request_complete(struct request *rq) | |
814 | { | |
815 | WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); | |
816 | } | |
817 | ||
e8dc17e2 SAS |
818 | /* |
819 | * Complete the request directly instead of deferring it to softirq or | |
820 | * completing it another CPU. Useful in preemptible instead of an interrupt. | |
821 | */ | |
822 | static inline void blk_mq_complete_request_direct(struct request *rq, | |
823 | void (*complete)(struct request *rq)) | |
824 | { | |
825 | WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); | |
826 | complete(rq); | |
827 | } | |
828 | ||
e2490073 | 829 | void blk_mq_start_request(struct request *rq); |
2a842aca CH |
830 | void blk_mq_end_request(struct request *rq, blk_status_t error); |
831 | void __blk_mq_end_request(struct request *rq, blk_status_t error); | |
f794f335 JA |
832 | void blk_mq_end_request_batch(struct io_comp_batch *ib); |
833 | ||
834 | /* | |
835 | * Only need start/end time stamping if we have iostat or | |
836 | * blk stats enabled, or using an IO scheduler. | |
837 | */ | |
838 | static inline bool blk_mq_need_time_stamp(struct request *rq) | |
839 | { | |
840 | return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); | |
841 | } | |
842 | ||
99e48cd6 JG |
843 | static inline bool blk_mq_is_reserved_rq(struct request *rq) |
844 | { | |
845 | return rq->rq_flags & RQF_RESV; | |
846 | } | |
847 | ||
f794f335 JA |
848 | /* |
849 | * Batched completions only work when there is no I/O error and no special | |
850 | * ->end_io handler. | |
851 | */ | |
852 | static inline bool blk_mq_add_to_batch(struct request *req, | |
853 | struct io_comp_batch *iob, int ioerror, | |
854 | void (*complete)(struct io_comp_batch *)) | |
855 | { | |
ab3e1d3b | 856 | if (!iob || (req->rq_flags & RQF_ELV) || ioerror) |
f794f335 | 857 | return false; |
ab3e1d3b | 858 | |
f794f335 JA |
859 | if (!iob->complete) |
860 | iob->complete = complete; | |
861 | else if (iob->complete != complete) | |
862 | return false; | |
863 | iob->need_ts |= blk_mq_need_time_stamp(req); | |
864 | rq_list_add(&iob->req_list, req); | |
865 | return true; | |
866 | } | |
320ae51f | 867 | |
2b053aca | 868 | void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); |
6fca6a61 | 869 | void blk_mq_kick_requeue_list(struct request_queue *q); |
2849450a | 870 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); |
15f73f5b | 871 | void blk_mq_complete_request(struct request *rq); |
40d09b53 | 872 | bool blk_mq_complete_request_remote(struct request *rq); |
320ae51f JA |
873 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
874 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | |
280d45f6 | 875 | void blk_mq_stop_hw_queues(struct request_queue *q); |
2f268556 | 876 | void blk_mq_start_hw_queues(struct request_queue *q); |
ae911c5e | 877 | void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
1b4a3258 | 878 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
97e01209 | 879 | void blk_mq_quiesce_queue(struct request_queue *q); |
9ef4d020 | 880 | void blk_mq_wait_quiesce_done(struct request_queue *q); |
e4e73913 | 881 | void blk_mq_unquiesce_queue(struct request_queue *q); |
7587a5ae | 882 | void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
626fb735 | 883 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
b94ec296 | 884 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
b9151e7b | 885 | void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs); |
e0489487 SG |
886 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
887 | busy_tag_iter_fn *fn, void *priv); | |
f9934a80 | 888 | void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); |
c761d96b | 889 | void blk_mq_freeze_queue(struct request_queue *q); |
b4c6a028 | 890 | void blk_mq_unfreeze_queue(struct request_queue *q); |
1671d522 | 891 | void blk_freeze_queue_start(struct request_queue *q); |
6bae363e | 892 | void blk_mq_freeze_queue_wait(struct request_queue *q); |
f91328c4 KB |
893 | int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, |
894 | unsigned long timeout); | |
320ae51f | 895 | |
a4e1d0b7 | 896 | void blk_mq_map_queues(struct blk_mq_queue_map *qmap); |
868f2f0b KB |
897 | void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); |
898 | ||
852ec809 | 899 | void blk_mq_quiesce_queue_nowait(struct request_queue *q); |
4f084b41 | 900 | |
9cf2bab6 JA |
901 | unsigned int blk_mq_rq_cpu(struct request *rq); |
902 | ||
15f73f5b CH |
903 | bool __blk_should_fake_timeout(struct request_queue *q); |
904 | static inline bool blk_should_fake_timeout(struct request_queue *q) | |
905 | { | |
906 | if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) && | |
907 | test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) | |
908 | return __blk_should_fake_timeout(q); | |
909 | return false; | |
910 | } | |
911 | ||
d386732b AA |
912 | /** |
913 | * blk_mq_rq_from_pdu - cast a PDU to a request | |
914 | * @pdu: the PDU (Protocol Data Unit) to be casted | |
915 | * | |
916 | * Return: request | |
917 | * | |
320ae51f | 918 | * Driver command data is immediately after the request. So subtract request |
d386732b | 919 | * size to get back to the original request. |
320ae51f JA |
920 | */ |
921 | static inline struct request *blk_mq_rq_from_pdu(void *pdu) | |
922 | { | |
923 | return pdu - sizeof(struct request); | |
924 | } | |
d386732b AA |
925 | |
926 | /** | |
927 | * blk_mq_rq_to_pdu - cast a request to a PDU | |
928 | * @rq: the request to be casted | |
929 | * | |
930 | * Return: pointer to the PDU | |
931 | * | |
932 | * Driver command data is immediately after the request. So add request to get | |
933 | * the PDU. | |
934 | */ | |
320ae51f JA |
935 | static inline void *blk_mq_rq_to_pdu(struct request *rq) |
936 | { | |
2963e3f7 | 937 | return rq + 1; |
320ae51f JA |
938 | } |
939 | ||
320ae51f | 940 | #define queue_for_each_hw_ctx(q, hctx, i) \ |
4e5cc99e | 941 | xa_for_each(&(q)->hctx_table, (i), (hctx)) |
320ae51f | 942 | |
320ae51f | 943 | #define hctx_for_each_ctx(hctx, ctx, i) \ |
0d0b7d42 JA |
944 | for ((i) = 0; (i) < (hctx)->nr_ctx && \ |
945 | ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) | |
320ae51f | 946 | |
226b4fc7 ML |
947 | static inline void blk_mq_cleanup_rq(struct request *rq) |
948 | { | |
949 | if (rq->q->mq_ops->cleanup_rq) | |
950 | rq->q->mq_ops->cleanup_rq(rq); | |
951 | } | |
952 | ||
53ffabfd CK |
953 | static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, |
954 | unsigned int nr_segs) | |
955 | { | |
956 | rq->nr_phys_segments = nr_segs; | |
957 | rq->__data_len = bio->bi_iter.bi_size; | |
958 | rq->bio = rq->biotail = bio; | |
959 | rq->ioprio = bio_prio(bio); | |
53ffabfd CK |
960 | } |
961 | ||
fb01a293 ML |
962 | void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, |
963 | struct lock_class_key *key); | |
8cf7961d | 964 | |
24b83deb CH |
965 | static inline bool rq_is_sync(struct request *rq) |
966 | { | |
967 | return op_is_sync(rq->cmd_flags); | |
968 | } | |
969 | ||
970 | void blk_rq_init(struct request_queue *q, struct request *rq); | |
24b83deb CH |
971 | int blk_rq_prep_clone(struct request *rq, struct request *rq_src, |
972 | struct bio_set *bs, gfp_t gfp_mask, | |
973 | int (*bio_ctr)(struct bio *, struct bio *, void *), void *data); | |
974 | void blk_rq_unprep_clone(struct request *rq); | |
28db4711 | 975 | blk_status_t blk_insert_cloned_request(struct request *rq); |
24b83deb CH |
976 | |
977 | struct rq_map_data { | |
978 | struct page **pages; | |
24b83deb | 979 | unsigned long offset; |
f5d632d1 JA |
980 | unsigned short page_order; |
981 | unsigned short nr_entries; | |
982 | bool null_mapped; | |
983 | bool from_user; | |
24b83deb CH |
984 | }; |
985 | ||
986 | int blk_rq_map_user(struct request_queue *, struct request *, | |
987 | struct rq_map_data *, void __user *, unsigned long, gfp_t); | |
988 | int blk_rq_map_user_iov(struct request_queue *, struct request *, | |
989 | struct rq_map_data *, const struct iov_iter *, gfp_t); | |
990 | int blk_rq_unmap_user(struct bio *); | |
991 | int blk_rq_map_kern(struct request_queue *, struct request *, void *, | |
992 | unsigned int, gfp_t); | |
993 | int blk_rq_append_bio(struct request *rq, struct bio *bio); | |
e2e53086 | 994 | void blk_execute_rq_nowait(struct request *rq, bool at_head); |
b84ba30b | 995 | blk_status_t blk_execute_rq(struct request *rq, bool at_head); |
c6e99ea4 | 996 | bool blk_rq_is_poll(struct request *rq); |
24b83deb CH |
997 | |
998 | struct req_iterator { | |
999 | struct bvec_iter iter; | |
1000 | struct bio *bio; | |
1001 | }; | |
1002 | ||
1003 | #define __rq_for_each_bio(_bio, rq) \ | |
1004 | if ((rq->bio)) \ | |
1005 | for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) | |
1006 | ||
1007 | #define rq_for_each_segment(bvl, _rq, _iter) \ | |
1008 | __rq_for_each_bio(_iter.bio, _rq) \ | |
1009 | bio_for_each_segment(bvl, _iter.bio, _iter.iter) | |
1010 | ||
1011 | #define rq_for_each_bvec(bvl, _rq, _iter) \ | |
1012 | __rq_for_each_bio(_iter.bio, _rq) \ | |
1013 | bio_for_each_bvec(bvl, _iter.bio, _iter.iter) | |
1014 | ||
1015 | #define rq_iter_last(bvec, _iter) \ | |
1016 | (_iter.bio->bi_next == NULL && \ | |
1017 | bio_iter_last(bvec, _iter.iter)) | |
1018 | ||
1019 | /* | |
1020 | * blk_rq_pos() : the current sector | |
1021 | * blk_rq_bytes() : bytes left in the entire request | |
1022 | * blk_rq_cur_bytes() : bytes left in the current segment | |
24b83deb CH |
1023 | * blk_rq_sectors() : sectors left in the entire request |
1024 | * blk_rq_cur_sectors() : sectors left in the current segment | |
1025 | * blk_rq_stats_sectors() : sectors of the entire request used for stats | |
1026 | */ | |
1027 | static inline sector_t blk_rq_pos(const struct request *rq) | |
1028 | { | |
1029 | return rq->__sector; | |
1030 | } | |
1031 | ||
1032 | static inline unsigned int blk_rq_bytes(const struct request *rq) | |
1033 | { | |
1034 | return rq->__data_len; | |
1035 | } | |
1036 | ||
1037 | static inline int blk_rq_cur_bytes(const struct request *rq) | |
1038 | { | |
b6559d8f CH |
1039 | if (!rq->bio) |
1040 | return 0; | |
1041 | if (!bio_has_data(rq->bio)) /* dataless requests such as discard */ | |
1042 | return rq->bio->bi_iter.bi_size; | |
1043 | return bio_iovec(rq->bio).bv_len; | |
24b83deb CH |
1044 | } |
1045 | ||
24b83deb CH |
1046 | static inline unsigned int blk_rq_sectors(const struct request *rq) |
1047 | { | |
1048 | return blk_rq_bytes(rq) >> SECTOR_SHIFT; | |
1049 | } | |
1050 | ||
1051 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | |
1052 | { | |
1053 | return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; | |
1054 | } | |
1055 | ||
1056 | static inline unsigned int blk_rq_stats_sectors(const struct request *rq) | |
1057 | { | |
1058 | return rq->stats_sectors; | |
1059 | } | |
1060 | ||
1061 | /* | |
1062 | * Some commands like WRITE SAME have a payload or data transfer size which | |
1063 | * is different from the size of the request. Any driver that supports such | |
1064 | * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to | |
1065 | * calculate the data transfer size. | |
1066 | */ | |
1067 | static inline unsigned int blk_rq_payload_bytes(struct request *rq) | |
1068 | { | |
1069 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) | |
1070 | return rq->special_vec.bv_len; | |
1071 | return blk_rq_bytes(rq); | |
1072 | } | |
1073 | ||
1074 | /* | |
1075 | * Return the first full biovec in the request. The caller needs to check that | |
1076 | * there are any bvecs before calling this helper. | |
1077 | */ | |
1078 | static inline struct bio_vec req_bvec(struct request *rq) | |
1079 | { | |
1080 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) | |
1081 | return rq->special_vec; | |
1082 | return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); | |
1083 | } | |
1084 | ||
1085 | static inline unsigned int blk_rq_count_bios(struct request *rq) | |
1086 | { | |
1087 | unsigned int nr_bios = 0; | |
1088 | struct bio *bio; | |
1089 | ||
1090 | __rq_for_each_bio(bio, rq) | |
1091 | nr_bios++; | |
1092 | ||
1093 | return nr_bios; | |
1094 | } | |
1095 | ||
1096 | void blk_steal_bios(struct bio_list *list, struct request *rq); | |
1097 | ||
1098 | /* | |
1099 | * Request completion related functions. | |
1100 | * | |
1101 | * blk_update_request() completes given number of bytes and updates | |
1102 | * the request without completing it. | |
1103 | */ | |
1104 | bool blk_update_request(struct request *rq, blk_status_t error, | |
1105 | unsigned int nr_bytes); | |
1106 | void blk_abort_request(struct request *); | |
1107 | ||
1108 | /* | |
1109 | * Number of physical segments as sent to the device. | |
1110 | * | |
1111 | * Normally this is the number of discontiguous data segments sent by the | |
1112 | * submitter. But for data-less command like discard we might have no | |
1113 | * actual data segments submitted, but the driver might have to add it's | |
1114 | * own special payload. In that case we still return 1 here so that this | |
1115 | * special payload will be mapped. | |
1116 | */ | |
1117 | static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) | |
1118 | { | |
1119 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) | |
1120 | return 1; | |
1121 | return rq->nr_phys_segments; | |
1122 | } | |
1123 | ||
1124 | /* | |
1125 | * Number of discard segments (or ranges) the driver needs to fill in. | |
1126 | * Each discard bio merged into a request is counted as one segment. | |
1127 | */ | |
1128 | static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) | |
1129 | { | |
1130 | return max_t(unsigned short, rq->nr_phys_segments, 1); | |
1131 | } | |
1132 | ||
1133 | int __blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
1134 | struct scatterlist *sglist, struct scatterlist **last_sg); | |
1135 | static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
1136 | struct scatterlist *sglist) | |
1137 | { | |
1138 | struct scatterlist *last_sg = NULL; | |
1139 | ||
1140 | return __blk_rq_map_sg(q, rq, sglist, &last_sg); | |
1141 | } | |
1142 | void blk_dump_rq_flags(struct request *, char *); | |
1143 | ||
1144 | #ifdef CONFIG_BLK_DEV_ZONED | |
1145 | static inline unsigned int blk_rq_zone_no(struct request *rq) | |
1146 | { | |
d86e716a | 1147 | return disk_zone_no(rq->q->disk, blk_rq_pos(rq)); |
24b83deb CH |
1148 | } |
1149 | ||
1150 | static inline unsigned int blk_rq_zone_is_seq(struct request *rq) | |
1151 | { | |
d86e716a | 1152 | return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq)); |
24b83deb CH |
1153 | } |
1154 | ||
1155 | bool blk_req_needs_zone_write_lock(struct request *rq); | |
1156 | bool blk_req_zone_write_trylock(struct request *rq); | |
1157 | void __blk_req_zone_write_lock(struct request *rq); | |
1158 | void __blk_req_zone_write_unlock(struct request *rq); | |
1159 | ||
1160 | static inline void blk_req_zone_write_lock(struct request *rq) | |
1161 | { | |
1162 | if (blk_req_needs_zone_write_lock(rq)) | |
1163 | __blk_req_zone_write_lock(rq); | |
1164 | } | |
1165 | ||
1166 | static inline void blk_req_zone_write_unlock(struct request *rq) | |
1167 | { | |
1168 | if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) | |
1169 | __blk_req_zone_write_unlock(rq); | |
1170 | } | |
1171 | ||
1172 | static inline bool blk_req_zone_is_write_locked(struct request *rq) | |
1173 | { | |
d86e716a CH |
1174 | return rq->q->disk->seq_zones_wlock && |
1175 | test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock); | |
24b83deb CH |
1176 | } |
1177 | ||
1178 | static inline bool blk_req_can_dispatch_to_zone(struct request *rq) | |
1179 | { | |
1180 | if (!blk_req_needs_zone_write_lock(rq)) | |
1181 | return true; | |
1182 | return !blk_req_zone_is_write_locked(rq); | |
1183 | } | |
1184 | #else /* CONFIG_BLK_DEV_ZONED */ | |
1185 | static inline bool blk_req_needs_zone_write_lock(struct request *rq) | |
1186 | { | |
1187 | return false; | |
1188 | } | |
1189 | ||
1190 | static inline void blk_req_zone_write_lock(struct request *rq) | |
1191 | { | |
1192 | } | |
1193 | ||
1194 | static inline void blk_req_zone_write_unlock(struct request *rq) | |
1195 | { | |
1196 | } | |
1197 | static inline bool blk_req_zone_is_write_locked(struct request *rq) | |
1198 | { | |
1199 | return false; | |
1200 | } | |
1201 | ||
1202 | static inline bool blk_req_can_dispatch_to_zone(struct request *rq) | |
1203 | { | |
1204 | return true; | |
1205 | } | |
1206 | #endif /* CONFIG_BLK_DEV_ZONED */ | |
1207 | ||
24b83deb | 1208 | #endif /* BLK_MQ_H */ |