Merge tag 'fbdev-for-6.4-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[linux-block.git] / include / linux / blk-mq.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
320ae51f
JA
2#ifndef BLK_MQ_H
3#define BLK_MQ_H
4
5#include <linux/blkdev.h>
88459642 6#include <linux/sbitmap.h>
fb01a293 7#include <linux/lockdep.h>
24b83deb 8#include <linux/scatterlist.h>
e028f167 9#include <linux/prefetch.h>
80bd4a7a 10#include <linux/srcu.h>
320ae51f
JA
11
12struct blk_mq_tags;
f70ced09 13struct blk_flush_queue;
320ae51f 14
24b83deb 15#define BLKDEV_MIN_RQ 4
d2a27964 16#define BLKDEV_DEFAULT_RQ 128
24b83deb 17
de671d61
JA
18enum rq_end_io_ret {
19 RQ_END_IO_NONE,
20 RQ_END_IO_FREE,
21};
22
23typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
24b83deb
CH
24
25/*
26 * request flags */
27typedef __u32 __bitwise req_flags_t;
28
29/* drive already may have started this one */
30#define RQF_STARTED ((__force req_flags_t)(1 << 1))
31/* may not be passed by ioscheduler */
32#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
33/* request for flush sequence */
34#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
35/* merge of different types, fail separately */
36#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
37/* track inflight for MQ */
38#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
39/* don't call prep for this one */
40#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
41/* vaguely specified driver internal error. Ignored by the block layer */
42#define RQF_FAILED ((__force req_flags_t)(1 << 10))
43/* don't warn about errors */
44#define RQF_QUIET ((__force req_flags_t)(1 << 11))
45/* elevator private data attached */
46#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
47/* account into disk and partition IO statistics */
48#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
49/* runtime pm request */
50#define RQF_PM ((__force req_flags_t)(1 << 15))
51/* on IO scheduler merge hash */
52#define RQF_HASHED ((__force req_flags_t)(1 << 16))
53/* track IO completion time */
54#define RQF_STATS ((__force req_flags_t)(1 << 17))
55/* Look at ->special_vec for the actual data payload instead of the
56 bio chain. */
57#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
58/* The per-zone write lock is held for this request */
59#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
24b83deb
CH
60/* ->timeout has been called, don't expire again */
61#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
2ff0682d
JA
62/* queue has elevator attached */
63#define RQF_ELV ((__force req_flags_t)(1 << 22))
99e48cd6 64#define RQF_RESV ((__force req_flags_t)(1 << 23))
24b83deb
CH
65
66/* flags that prevent us from merging requests: */
67#define RQF_NOMERGE_FLAGS \
68 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
69
70enum mq_rq_state {
71 MQ_RQ_IDLE = 0,
72 MQ_RQ_IN_FLIGHT = 1,
73 MQ_RQ_COMPLETE = 2,
74};
75
76/*
77 * Try to put the fields that are referenced together in the same cacheline.
78 *
79 * If you modify this structure, make sure to update blk_rq_init() and
80 * especially blk_mq_rq_ctx_init() to take care of the added fields.
81 */
82struct request {
83 struct request_queue *q;
84 struct blk_mq_ctx *mq_ctx;
85 struct blk_mq_hw_ctx *mq_hctx;
86
16458cf3 87 blk_opf_t cmd_flags; /* op and common flags */
24b83deb
CH
88 req_flags_t rq_flags;
89
90 int tag;
91 int internal_tag;
92
b6087629
JA
93 unsigned int timeout;
94
24b83deb
CH
95 /* the following two fields are internal, NEVER access directly */
96 unsigned int __data_len; /* total data len */
97 sector_t __sector; /* sector cursor */
98
99 struct bio *bio;
100 struct bio *biotail;
101
47c122e3
JA
102 union {
103 struct list_head queuelist;
104 struct request *rq_next;
105 };
24b83deb 106
24b83deb
CH
107 struct block_device *part;
108#ifdef CONFIG_BLK_RQ_ALLOC_TIME
109 /* Time that the first bio started allocating this request. */
110 u64 alloc_time_ns;
111#endif
112 /* Time that this request was allocated for this IO. */
113 u64 start_time_ns;
114 /* Time that I/O was submitted to the device. */
115 u64 io_start_time_ns;
116
117#ifdef CONFIG_BLK_WBT
118 unsigned short wbt_flags;
119#endif
120 /*
121 * rq sectors used for blk stats. It has the same value
122 * with blk_rq_sectors(rq), except that it never be zeroed
123 * by completion.
124 */
125 unsigned short stats_sectors;
126
127 /*
128 * Number of scatter-gather DMA addr+len pairs after
129 * physical address coalescing is performed.
130 */
131 unsigned short nr_phys_segments;
132
133#ifdef CONFIG_BLK_DEV_INTEGRITY
134 unsigned short nr_integrity_segments;
135#endif
136
137#ifdef CONFIG_BLK_INLINE_ENCRYPTION
138 struct bio_crypt_ctx *crypt_ctx;
cb77cb5a 139 struct blk_crypto_keyslot *crypt_keyslot;
24b83deb
CH
140#endif
141
24b83deb
CH
142 unsigned short ioprio;
143
144 enum mq_rq_state state;
0a467d0f 145 atomic_t ref;
24b83deb 146
24b83deb
CH
147 unsigned long deadline;
148
b6087629
JA
149 /*
150 * The hash is used inside the scheduler, and killed once the
151 * request reaches the dispatch list. The ipi_list is only used
152 * to queue the request for softirq completion, which is long
153 * after the request has been unhashed (and even removed from
154 * the dispatch list).
155 */
156 union {
157 struct hlist_node hash; /* merge hash */
158 struct llist_node ipi_list;
159 };
160
161 /*
162 * The rb_node is only used inside the io scheduler, requests
163 * are pruned when moved to the dispatch queue. So let the
164 * completion_data share space with the rb_node.
165 */
166 union {
167 struct rb_node rb_node; /* sort/lookup */
168 struct bio_vec special_vec;
169 void *completion_data;
b6087629
JA
170 };
171
172
173 /*
174 * Three pointers are available for the IO schedulers, if they need
175 * more they have to dynamically allocate it. Flush requests are
176 * never put on the IO scheduler. So let the flush fields share
177 * space with the elevator data.
178 */
179 union {
180 struct {
181 struct io_cq *icq;
182 void *priv[2];
183 } elv;
184
185 struct {
186 unsigned int seq;
187 struct list_head list;
188 rq_end_io_fn *saved_end_io;
189 } flush;
190 };
191
24b83deb
CH
192 union {
193 struct __call_single_data csd;
194 u64 fifo_time;
195 };
196
197 /*
198 * completion callback.
199 */
200 rq_end_io_fn *end_io;
201 void *end_io_data;
202};
203
2d9b02be
BVA
204static inline enum req_op req_op(const struct request *req)
205{
206 return req->cmd_flags & REQ_OP_MASK;
207}
24b83deb
CH
208
209static inline bool blk_rq_is_passthrough(struct request *rq)
210{
211 return blk_op_is_passthrough(req_op(rq));
212}
213
214static inline unsigned short req_get_ioprio(struct request *req)
215{
216 return req->ioprio;
217}
218
219#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
220
221#define rq_dma_dir(rq) \
222 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
223
edce22e1
KB
224#define rq_list_add(listptr, rq) do { \
225 (rq)->rq_next = *(listptr); \
226 *(listptr) = rq; \
227} while (0)
228
34e0a279
JK
229#define rq_list_add_tail(lastpptr, rq) do { \
230 (rq)->rq_next = NULL; \
231 **(lastpptr) = rq; \
232 *(lastpptr) = &rq->rq_next; \
233} while (0)
234
edce22e1
KB
235#define rq_list_pop(listptr) \
236({ \
237 struct request *__req = NULL; \
238 if ((listptr) && *(listptr)) { \
239 __req = *(listptr); \
240 *(listptr) = __req->rq_next; \
241 } \
242 __req; \
243})
244
245#define rq_list_peek(listptr) \
246({ \
247 struct request *__req = NULL; \
248 if ((listptr) && *(listptr)) \
249 __req = *(listptr); \
250 __req; \
251})
252
253#define rq_list_for_each(listptr, pos) \
254 for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
255
3764fd05
KB
256#define rq_list_for_each_safe(listptr, pos, nxt) \
257 for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
258 pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
259
edce22e1
KB
260#define rq_list_next(rq) (rq)->rq_next
261#define rq_list_empty(list) ((list) == (struct request *) NULL)
262
d2528be7
KB
263/**
264 * rq_list_move() - move a struct request from one list to another
265 * @src: The source list @rq is currently in
266 * @dst: The destination list that @rq will be appended to
267 * @rq: The request to move
268 * @prev: The request preceding @rq in @src (NULL if @rq is the head)
269 */
292c33c9 270static inline void rq_list_move(struct request **src, struct request **dst,
d2528be7
KB
271 struct request *rq, struct request *prev)
272{
273 if (prev)
274 prev->rq_next = rq->rq_next;
275 else
276 *src = rq->rq_next;
277 rq_list_add(dst, rq);
278}
279
b2bed51a
BVA
280/**
281 * enum blk_eh_timer_return - How the timeout handler should proceed
282 * @BLK_EH_DONE: The block driver completed the command or will complete it at
283 * a later time.
284 * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
285 * request to complete.
286 */
24b83deb 287enum blk_eh_timer_return {
b2bed51a
BVA
288 BLK_EH_DONE,
289 BLK_EH_RESET_TIMER,
24b83deb
CH
290};
291
292#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
293#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
294
fe644072 295/**
d386732b
AA
296 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
297 * block device
fe644072 298 */
320ae51f
JA
299struct blk_mq_hw_ctx {
300 struct {
d386732b 301 /** @lock: Protects the dispatch list. */
320ae51f 302 spinlock_t lock;
d386732b
AA
303 /**
304 * @dispatch: Used for requests that are ready to be
305 * dispatched to the hardware but for some reason (e.g. lack of
306 * resources) could not be sent to the hardware. As soon as the
307 * driver can send new requests, requests at this list will
308 * be sent first for a fairer dispatch.
309 */
320ae51f 310 struct list_head dispatch;
d386732b
AA
311 /**
312 * @state: BLK_MQ_S_* flags. Defines the state of the hw
313 * queue (active, scheduled to restart, stopped).
314 */
315 unsigned long state;
320ae51f
JA
316 } ____cacheline_aligned_in_smp;
317
d386732b
AA
318 /**
319 * @run_work: Used for scheduling a hardware queue run at a later time.
320 */
9f993737 321 struct delayed_work run_work;
d386732b 322 /** @cpumask: Map of available CPUs where this hctx can run. */
e4043dcf 323 cpumask_var_t cpumask;
d386732b
AA
324 /**
325 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
326 * selection from @cpumask.
327 */
506e931f 328 int next_cpu;
d386732b
AA
329 /**
330 * @next_cpu_batch: Counter of how many works left in the batch before
331 * changing to the next CPU.
332 */
506e931f 333 int next_cpu_batch;
320ae51f 334
d386732b
AA
335 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
336 unsigned long flags;
320ae51f 337
d386732b
AA
338 /**
339 * @sched_data: Pointer owned by the IO scheduler attached to a request
340 * queue. It's up to the IO scheduler how to use this pointer.
341 */
bd166ef1 342 void *sched_data;
d386732b
AA
343 /**
344 * @queue: Pointer to the request queue that owns this hardware context.
345 */
320ae51f 346 struct request_queue *queue;
d386732b 347 /** @fq: Queue of requests that need to perform a flush operation. */
f70ced09 348 struct blk_flush_queue *fq;
320ae51f 349
d386732b
AA
350 /**
351 * @driver_data: Pointer to data owned by the block driver that created
352 * this hctx
353 */
320ae51f
JA
354 void *driver_data;
355
d386732b
AA
356 /**
357 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
358 * pending request in that software queue.
359 */
88459642 360 struct sbitmap ctx_map;
1429d7c9 361
d386732b
AA
362 /**
363 * @dispatch_from: Software queue to be used when no scheduler was
364 * selected.
365 */
b347689f 366 struct blk_mq_ctx *dispatch_from;
d386732b
AA
367 /**
368 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
369 * decide if the hw_queue is busy using Exponential Weighted Moving
370 * Average algorithm.
371 */
6e768717 372 unsigned int dispatch_busy;
b347689f 373
d386732b 374 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
f31967f0 375 unsigned short type;
d386732b 376 /** @nr_ctx: Number of software queues. */
f31967f0 377 unsigned short nr_ctx;
d386732b 378 /** @ctxs: Array of software queues. */
6e768717 379 struct blk_mq_ctx **ctxs;
4bb659b1 380
d386732b 381 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
5815839b 382 spinlock_t dispatch_wait_lock;
d386732b
AA
383 /**
384 * @dispatch_wait: Waitqueue to put requests when there is no tag
385 * available at the moment, to wait for another try in the future.
386 */
eb619fdb 387 wait_queue_entry_t dispatch_wait;
d386732b
AA
388
389 /**
390 * @wait_index: Index of next available dispatch_wait queue to insert
391 * requests.
392 */
8537b120 393 atomic_t wait_index;
320ae51f 394
d386732b
AA
395 /**
396 * @tags: Tags owned by the block driver. A tag at this set is only
397 * assigned when a request is dispatched from a hardware queue.
398 */
320ae51f 399 struct blk_mq_tags *tags;
d386732b
AA
400 /**
401 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
402 * scheduler associated with a request queue, a tag is assigned when
403 * that request is allocated. Else, this member is not used.
404 */
bd166ef1 405 struct blk_mq_tags *sched_tags;
320ae51f 406
d386732b 407 /** @queued: Number of queued requests. */
320ae51f 408 unsigned long queued;
d386732b 409 /** @run: Number of dispatched requests. */
320ae51f 410 unsigned long run;
320ae51f 411
d386732b 412 /** @numa_node: NUMA node the storage adapter has been connected to. */
320ae51f 413 unsigned int numa_node;
d386732b 414 /** @queue_num: Index of this hardware queue. */
17ded320 415 unsigned int queue_num;
320ae51f 416
d386732b
AA
417 /**
418 * @nr_active: Number of active requests. Only used when a tag set is
419 * shared across request queues.
420 */
0d2602ca
JA
421 atomic_t nr_active;
422
bf0beec0
ML
423 /** @cpuhp_online: List to store request if CPU is going to die */
424 struct hlist_node cpuhp_online;
d386732b 425 /** @cpuhp_dead: List to store request if some CPU die. */
9467f859 426 struct hlist_node cpuhp_dead;
d386732b 427 /** @kobj: Kernel object for sysfs. */
320ae51f 428 struct kobject kobj;
05229bee 429
9c1051aa 430#ifdef CONFIG_BLK_DEBUG_FS
d386732b
AA
431 /**
432 * @debugfs_dir: debugfs directory for this hardware queue. Named
433 * as cpu<cpu_number>.
434 */
9c1051aa 435 struct dentry *debugfs_dir;
d386732b 436 /** @sched_debugfs_dir: debugfs directory for the scheduler. */
d332ce09 437 struct dentry *sched_debugfs_dir;
9c1051aa 438#endif
07319678 439
2dd209f0
BVA
440 /**
441 * @hctx_list: if this hctx is not in use, this is an entry in
442 * q->unused_hctx_list.
443 */
2f8f1336 444 struct list_head hctx_list;
320ae51f
JA
445};
446
7a18312c 447/**
d386732b 448 * struct blk_mq_queue_map - Map software queues to hardware queues
7a18312c
BVA
449 * @mq_map: CPU ID to hardware queue index map. This is an array
450 * with nr_cpu_ids elements. Each element has a value in the range
451 * [@queue_offset, @queue_offset + @nr_queues).
452 * @nr_queues: Number of hardware queues to map CPU IDs onto.
453 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
454 * driver to map each hardware queue type (enum hctx_type) onto a distinct
455 * set of hardware queues.
456 */
ed76e329
JA
457struct blk_mq_queue_map {
458 unsigned int *mq_map;
459 unsigned int nr_queues;
843477d4 460 unsigned int queue_offset;
ed76e329
JA
461};
462
d386732b
AA
463/**
464 * enum hctx_type - Type of hardware queue
465 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
466 * @HCTX_TYPE_READ: Just for READ I/O.
467 * @HCTX_TYPE_POLL: Polled I/O of any kind.
468 * @HCTX_MAX_TYPES: Number of types of hctx.
469 */
e20ba6e1 470enum hctx_type {
d386732b
AA
471 HCTX_TYPE_DEFAULT,
472 HCTX_TYPE_READ,
473 HCTX_TYPE_POLL,
e20ba6e1
CH
474
475 HCTX_MAX_TYPES,
ed76e329
JA
476};
477
7a18312c
BVA
478/**
479 * struct blk_mq_tag_set - tag set that can be shared between request queues
d88cbbb3 480 * @ops: Pointers to functions that implement block driver behavior.
7a18312c
BVA
481 * @map: One or more ctx -> hctx mappings. One map exists for each
482 * hardware queue type (enum hctx_type) that the driver wishes
483 * to support. There are no restrictions on maps being of the
484 * same size, and it's perfectly legal to share maps between
485 * types.
486 * @nr_maps: Number of elements in the @map array. A number in the range
487 * [1, HCTX_MAX_TYPES].
7a18312c
BVA
488 * @nr_hw_queues: Number of hardware queues supported by the block driver that
489 * owns this data structure.
490 * @queue_depth: Number of tags per hardware queue, reserved tags included.
491 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
492 * allocations.
493 * @cmd_size: Number of additional bytes to allocate per request. The block
494 * driver owns these additional bytes.
495 * @numa_node: NUMA node the storage adapter has been connected to.
496 * @timeout: Request processing timeout in jiffies.
497 * @flags: Zero or more BLK_MQ_F_* flags.
498 * @driver_data: Pointer to data owned by the block driver that created this
499 * tag set.
500 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
501 * elements.
079a2e3e
JG
502 * @shared_tags:
503 * Shared set of tags. Has @nr_hw_queues elements. If set,
504 * shared by all @tags.
7a18312c
BVA
505 * @tag_list_lock: Serializes tag_list accesses.
506 * @tag_list: List of the request queues that use this tag set. See also
507 * request_queue.tag_set_list.
80bd4a7a
CH
508 * @srcu: Use as lock when type of the request queue is blocking
509 * (BLK_MQ_F_BLOCKING).
7a18312c 510 */
24d2f903 511struct blk_mq_tag_set {
d88cbbb3 512 const struct blk_mq_ops *ops;
ed76e329 513 struct blk_mq_queue_map map[HCTX_MAX_TYPES];
7a18312c 514 unsigned int nr_maps;
7a18312c
BVA
515 unsigned int nr_hw_queues;
516 unsigned int queue_depth;
320ae51f 517 unsigned int reserved_tags;
7a18312c 518 unsigned int cmd_size;
320ae51f
JA
519 int numa_node;
520 unsigned int timeout;
7a18312c 521 unsigned int flags;
24d2f903
CH
522 void *driver_data;
523
524 struct blk_mq_tags **tags;
0d2602ca 525
079a2e3e 526 struct blk_mq_tags *shared_tags;
e155b0c2 527
0d2602ca
JA
528 struct mutex tag_list_lock;
529 struct list_head tag_list;
80bd4a7a 530 struct srcu_struct *srcu;
320ae51f
JA
531};
532
d386732b
AA
533/**
534 * struct blk_mq_queue_data - Data about a request inserted in a queue
535 *
536 * @rq: Request pointer.
537 * @last: If it is the last request in the queue.
538 */
74c45052
JA
539struct blk_mq_queue_data {
540 struct request *rq;
74c45052
JA
541 bool last;
542};
543
2dd6532e 544typedef bool (busy_tag_iter_fn)(struct request *, void *);
05229bee 545
d386732b
AA
546/**
547 * struct blk_mq_ops - Callback functions that implements block driver
548 * behaviour.
549 */
320ae51f 550struct blk_mq_ops {
d386732b
AA
551 /**
552 * @queue_rq: Queue a new request from block IO.
320ae51f 553 */
0516c2f6
DW
554 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
555 const struct blk_mq_queue_data *);
320ae51f 556
d386732b
AA
557 /**
558 * @commit_rqs: If a driver uses bd->last to judge when to submit
559 * requests to hardware, it must define this function. In case of errors
560 * that make us stop issuing further requests, this hook serves the
d666ba98
JA
561 * purpose of kicking the hardware (which the last request otherwise
562 * would have done).
563 */
0516c2f6 564 void (*commit_rqs)(struct blk_mq_hw_ctx *);
d666ba98 565
3c67d44d
JA
566 /**
567 * @queue_rqs: Queue a list of new requests. Driver is guaranteed
568 * that each request belongs to the same queue. If the driver doesn't
569 * empty the @rqlist completely, then the rest will be queued
570 * individually by the block layer upon return.
571 */
572 void (*queue_rqs)(struct request **rqlist);
573
d386732b
AA
574 /**
575 * @get_budget: Reserve budget before queue request, once .queue_rq is
de148297
ML
576 * run, it is driver's responsibility to release the
577 * reserved budget. Also we have to handle failure case
578 * of .get_budget for avoiding I/O deadlock.
579 */
2a5a24aa 580 int (*get_budget)(struct request_queue *);
0516c2f6 581
d386732b
AA
582 /**
583 * @put_budget: Release the reserved budget.
584 */
2a5a24aa 585 void (*put_budget)(struct request_queue *, int);
de148297 586
85367040
ML
587 /**
588 * @set_rq_budget_token: store rq's budget token
d022d18c
ML
589 */
590 void (*set_rq_budget_token)(struct request *, int);
85367040
ML
591 /**
592 * @get_rq_budget_token: retrieve rq's budget token
d022d18c
ML
593 */
594 int (*get_rq_budget_token)(struct request *);
595
d386732b
AA
596 /**
597 * @timeout: Called on request timeout.
320ae51f 598 */
9bdb4833 599 enum blk_eh_timer_return (*timeout)(struct request *);
320ae51f 600
d386732b
AA
601 /**
602 * @poll: Called to poll for completion of a specific tag.
05229bee 603 */
5a72e899 604 int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
05229bee 605
d386732b
AA
606 /**
607 * @complete: Mark the request as complete.
608 */
0516c2f6 609 void (*complete)(struct request *);
30a91cb4 610
d386732b
AA
611 /**
612 * @init_hctx: Called when the block layer side of a hardware queue has
613 * been set up, allowing the driver to allocate/init matching
614 * structures.
320ae51f 615 */
0516c2f6 616 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
d386732b
AA
617 /**
618 * @exit_hctx: Ditto for exit/teardown.
619 */
0516c2f6 620 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
e9b267d9 621
d386732b
AA
622 /**
623 * @init_request: Called for every command allocated by the block layer
624 * to allow the driver to set up driver specific data.
f70ced09
ML
625 *
626 * Tag greater than or equal to queue_depth is for setting up
627 * flush request.
e9b267d9 628 */
0516c2f6
DW
629 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
630 unsigned int, unsigned int);
d386732b
AA
631 /**
632 * @exit_request: Ditto for exit/teardown.
633 */
0516c2f6
DW
634 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
635 unsigned int);
d386732b 636
d386732b
AA
637 /**
638 * @cleanup_rq: Called before freeing one request which isn't completed
639 * yet, and usually for freeing the driver private data.
226b4fc7 640 */
0516c2f6 641 void (*cleanup_rq)(struct request *);
226b4fc7 642
d386732b
AA
643 /**
644 * @busy: If set, returns whether or not this queue currently is busy.
9ba20527 645 */
0516c2f6 646 bool (*busy)(struct request_queue *);
9ba20527 647
d386732b
AA
648 /**
649 * @map_queues: This allows drivers specify their own queue mapping by
650 * overriding the setup-time function that builds the mq_map.
651 */
a4e1d0b7 652 void (*map_queues)(struct blk_mq_tag_set *set);
2836ee4b
BVA
653
654#ifdef CONFIG_BLK_DEBUG_FS
d386732b
AA
655 /**
656 * @show_rq: Used by the debugfs implementation to show driver-specific
2836ee4b
BVA
657 * information about a request.
658 */
659 void (*show_rq)(struct seq_file *m, struct request *rq);
660#endif
320ae51f
JA
661};
662
663enum {
320ae51f 664 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
51db1c37 665 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
bf0beec0
ML
666 /*
667 * Set when this device requires underlying blk-mq device for
668 * completing IO:
669 */
670 BLK_MQ_F_STACKING = 1 << 2,
32bc15af 671 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
1b792f2f 672 BLK_MQ_F_BLOCKING = 1 << 5,
90b71980 673 /* Do not allow an I/O scheduler to be configured. */
d3484991 674 BLK_MQ_F_NO_SCHED = 1 << 6,
90b71980
BVA
675 /*
676 * Select 'none' during queue registration in case of a single hwq
677 * or shared hwqs instead of 'mq-deadline'.
678 */
679 BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
24391c0d
SL
680 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
681 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
320ae51f 682
5d12f905 683 BLK_MQ_S_STOPPED = 0,
0d2602ca 684 BLK_MQ_S_TAG_ACTIVE = 1,
bd166ef1 685 BLK_MQ_S_SCHED_RESTART = 2,
320ae51f 686
bf0beec0
ML
687 /* hw queue is inactive after all its CPUs become offline */
688 BLK_MQ_S_INACTIVE = 3,
689
a4391c64 690 BLK_MQ_MAX_DEPTH = 10240,
506e931f
JA
691
692 BLK_MQ_CPU_WORK_BATCH = 8,
320ae51f 693};
24391c0d
SL
694#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
695 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
696 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
697#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
698 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
699 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
320ae51f 700
e155b0c2
JG
701#define BLK_MQ_NO_HCTX_IDX (-1U)
702
4dcc4874
CH
703struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
704 struct lock_class_key *lkclass);
b461dfc4
CH
705#define blk_mq_alloc_disk(set, queuedata) \
706({ \
707 static struct lock_class_key __key; \
b461dfc4 708 \
4dcc4874 709 __blk_mq_alloc_disk(set, queuedata, &__key); \
b461dfc4 710})
6f8191fd
CH
711struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
712 struct lock_class_key *lkclass);
24d2f903 713struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
26a9750a
CH
714int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
715 struct request_queue *q);
6f8191fd 716void blk_mq_destroy_queue(struct request_queue *);
320ae51f 717
24d2f903 718int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
cdb14e0f
CH
719int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
720 const struct blk_mq_ops *ops, unsigned int queue_depth,
721 unsigned int set_flags);
24d2f903
CH
722void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
723
320ae51f 724void blk_mq_free_request(struct request *rq);
6f3b0e8b 725
3c94d83c 726bool blk_mq_queue_inflight(struct request_queue *q);
ae879912 727
6f3b0e8b 728enum {
9a95e4ef
BVA
729 /* return when out of requests */
730 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
731 /* allocate from reserved pool */
732 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
0854bcdc
BVA
733 /* set RQF_PM */
734 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
6f3b0e8b
CH
735};
736
16458cf3 737struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
9a95e4ef 738 blk_mq_req_flags_t flags);
cd6ce148 739struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
16458cf3 740 blk_opf_t opf, blk_mq_req_flags_t flags,
9a95e4ef 741 unsigned int hctx_idx);
e028f167
JA
742
743/*
744 * Tag address space map.
745 */
746struct blk_mq_tags {
747 unsigned int nr_tags;
748 unsigned int nr_reserved_tags;
749
750 atomic_t active_queues;
751
752 struct sbitmap_queue bitmap_tags;
753 struct sbitmap_queue breserved_tags;
754
755 struct request **rqs;
756 struct request **static_rqs;
757 struct list_head page_list;
758
759 /*
760 * used to clear request reference in rqs[] before freeing one
761 * request pool
762 */
763 spinlock_t lock;
764};
765
766static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
767 unsigned int tag)
768{
769 if (tag < tags->nr_tags) {
770 prefetch(tags->rqs[tag]);
771 return tags->rqs[tag];
772 }
773
774 return NULL;
775}
320ae51f 776
205fb5f5
BVA
777enum {
778 BLK_MQ_UNIQUE_TAG_BITS = 16,
779 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
780};
781
782u32 blk_mq_unique_tag(struct request *rq);
783
784static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
785{
786 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
787}
788
789static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
790{
791 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
792}
793
27a46989
PB
794/**
795 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
796 * @rq: target request.
797 */
798static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
799{
800 return READ_ONCE(rq->state);
801}
802
803static inline int blk_mq_request_started(struct request *rq)
804{
805 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
806}
807
808static inline int blk_mq_request_completed(struct request *rq)
809{
810 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
811}
320ae51f 812
83fba8c8
CL
813/*
814 *
815 * Set the state to complete when completing a request from inside ->queue_rq.
816 * This is used by drivers that want to ensure special complete actions that
817 * need access to the request are called on failure, e.g. by nvme for
818 * multipathing.
819 */
820static inline void blk_mq_set_request_complete(struct request *rq)
821{
822 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
823}
824
e8dc17e2
SAS
825/*
826 * Complete the request directly instead of deferring it to softirq or
827 * completing it another CPU. Useful in preemptible instead of an interrupt.
828 */
829static inline void blk_mq_complete_request_direct(struct request *rq,
830 void (*complete)(struct request *rq))
831{
832 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
833 complete(rq);
834}
835
e2490073 836void blk_mq_start_request(struct request *rq);
2a842aca
CH
837void blk_mq_end_request(struct request *rq, blk_status_t error);
838void __blk_mq_end_request(struct request *rq, blk_status_t error);
f794f335
JA
839void blk_mq_end_request_batch(struct io_comp_batch *ib);
840
841/*
842 * Only need start/end time stamping if we have iostat or
843 * blk stats enabled, or using an IO scheduler.
844 */
845static inline bool blk_mq_need_time_stamp(struct request *rq)
846{
847 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
848}
849
99e48cd6
JG
850static inline bool blk_mq_is_reserved_rq(struct request *rq)
851{
852 return rq->rq_flags & RQF_RESV;
853}
854
f794f335
JA
855/*
856 * Batched completions only work when there is no I/O error and no special
857 * ->end_io handler.
858 */
859static inline bool blk_mq_add_to_batch(struct request *req,
860 struct io_comp_batch *iob, int ioerror,
861 void (*complete)(struct io_comp_batch *))
862{
2d87d455
ML
863 if (!iob || (req->rq_flags & RQF_ELV) || ioerror ||
864 (req->end_io && !blk_rq_is_passthrough(req)))
f794f335 865 return false;
ab3e1d3b 866
f794f335
JA
867 if (!iob->complete)
868 iob->complete = complete;
869 else if (iob->complete != complete)
870 return false;
871 iob->need_ts |= blk_mq_need_time_stamp(req);
872 rq_list_add(&iob->req_list, req);
873 return true;
874}
320ae51f 875
2b053aca 876void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
6fca6a61 877void blk_mq_kick_requeue_list(struct request_queue *q);
2849450a 878void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
15f73f5b 879void blk_mq_complete_request(struct request *rq);
40d09b53 880bool blk_mq_complete_request_remote(struct request *rq);
320ae51f
JA
881void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
882void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
280d45f6 883void blk_mq_stop_hw_queues(struct request_queue *q);
2f268556 884void blk_mq_start_hw_queues(struct request_queue *q);
ae911c5e 885void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
1b4a3258 886void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
97e01209 887void blk_mq_quiesce_queue(struct request_queue *q);
483239c7 888void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
414dd48e
CL
889void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
890void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
e4e73913 891void blk_mq_unquiesce_queue(struct request_queue *q);
7587a5ae 892void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
626fb735 893void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
b94ec296 894void blk_mq_run_hw_queues(struct request_queue *q, bool async);
b9151e7b 895void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
e0489487
SG
896void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
897 busy_tag_iter_fn *fn, void *priv);
f9934a80 898void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
c761d96b 899void blk_mq_freeze_queue(struct request_queue *q);
b4c6a028 900void blk_mq_unfreeze_queue(struct request_queue *q);
1671d522 901void blk_freeze_queue_start(struct request_queue *q);
6bae363e 902void blk_mq_freeze_queue_wait(struct request_queue *q);
f91328c4
KB
903int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
904 unsigned long timeout);
320ae51f 905
a4e1d0b7 906void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
868f2f0b
KB
907void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
908
852ec809 909void blk_mq_quiesce_queue_nowait(struct request_queue *q);
4f084b41 910
9cf2bab6
JA
911unsigned int blk_mq_rq_cpu(struct request *rq);
912
15f73f5b
CH
913bool __blk_should_fake_timeout(struct request_queue *q);
914static inline bool blk_should_fake_timeout(struct request_queue *q)
915{
916 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
917 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
918 return __blk_should_fake_timeout(q);
919 return false;
920}
921
d386732b
AA
922/**
923 * blk_mq_rq_from_pdu - cast a PDU to a request
924 * @pdu: the PDU (Protocol Data Unit) to be casted
925 *
926 * Return: request
927 *
320ae51f 928 * Driver command data is immediately after the request. So subtract request
d386732b 929 * size to get back to the original request.
320ae51f
JA
930 */
931static inline struct request *blk_mq_rq_from_pdu(void *pdu)
932{
933 return pdu - sizeof(struct request);
934}
d386732b
AA
935
936/**
937 * blk_mq_rq_to_pdu - cast a request to a PDU
938 * @rq: the request to be casted
939 *
940 * Return: pointer to the PDU
941 *
942 * Driver command data is immediately after the request. So add request to get
943 * the PDU.
944 */
320ae51f
JA
945static inline void *blk_mq_rq_to_pdu(struct request *rq)
946{
2963e3f7 947 return rq + 1;
320ae51f
JA
948}
949
320ae51f 950#define queue_for_each_hw_ctx(q, hctx, i) \
4e5cc99e 951 xa_for_each(&(q)->hctx_table, (i), (hctx))
320ae51f 952
320ae51f 953#define hctx_for_each_ctx(hctx, ctx, i) \
0d0b7d42
JA
954 for ((i) = 0; (i) < (hctx)->nr_ctx && \
955 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
320ae51f 956
226b4fc7
ML
957static inline void blk_mq_cleanup_rq(struct request *rq)
958{
959 if (rq->q->mq_ops->cleanup_rq)
960 rq->q->mq_ops->cleanup_rq(rq);
961}
962
53ffabfd
CK
963static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
964 unsigned int nr_segs)
965{
966 rq->nr_phys_segments = nr_segs;
967 rq->__data_len = bio->bi_iter.bi_size;
968 rq->bio = rq->biotail = bio;
969 rq->ioprio = bio_prio(bio);
53ffabfd
CK
970}
971
fb01a293
ML
972void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
973 struct lock_class_key *key);
8cf7961d 974
24b83deb
CH
975static inline bool rq_is_sync(struct request *rq)
976{
977 return op_is_sync(rq->cmd_flags);
978}
979
980void blk_rq_init(struct request_queue *q, struct request *rq);
24b83deb
CH
981int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
982 struct bio_set *bs, gfp_t gfp_mask,
983 int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
984void blk_rq_unprep_clone(struct request *rq);
28db4711 985blk_status_t blk_insert_cloned_request(struct request *rq);
24b83deb
CH
986
987struct rq_map_data {
988 struct page **pages;
24b83deb 989 unsigned long offset;
f5d632d1
JA
990 unsigned short page_order;
991 unsigned short nr_entries;
992 bool null_mapped;
993 bool from_user;
24b83deb
CH
994};
995
996int blk_rq_map_user(struct request_queue *, struct request *,
997 struct rq_map_data *, void __user *, unsigned long, gfp_t);
55765402
AG
998int blk_rq_map_user_io(struct request *, struct rq_map_data *,
999 void __user *, unsigned long, gfp_t, bool, int, bool, int);
24b83deb
CH
1000int blk_rq_map_user_iov(struct request_queue *, struct request *,
1001 struct rq_map_data *, const struct iov_iter *, gfp_t);
1002int blk_rq_unmap_user(struct bio *);
1003int blk_rq_map_kern(struct request_queue *, struct request *, void *,
1004 unsigned int, gfp_t);
1005int blk_rq_append_bio(struct request *rq, struct bio *bio);
e2e53086 1006void blk_execute_rq_nowait(struct request *rq, bool at_head);
b84ba30b 1007blk_status_t blk_execute_rq(struct request *rq, bool at_head);
c6e99ea4 1008bool blk_rq_is_poll(struct request *rq);
24b83deb
CH
1009
1010struct req_iterator {
1011 struct bvec_iter iter;
1012 struct bio *bio;
1013};
1014
1015#define __rq_for_each_bio(_bio, rq) \
1016 if ((rq->bio)) \
1017 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
1018
1019#define rq_for_each_segment(bvl, _rq, _iter) \
1020 __rq_for_each_bio(_iter.bio, _rq) \
1021 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
1022
1023#define rq_for_each_bvec(bvl, _rq, _iter) \
1024 __rq_for_each_bio(_iter.bio, _rq) \
1025 bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
1026
1027#define rq_iter_last(bvec, _iter) \
1028 (_iter.bio->bi_next == NULL && \
1029 bio_iter_last(bvec, _iter.iter))
1030
1031/*
1032 * blk_rq_pos() : the current sector
1033 * blk_rq_bytes() : bytes left in the entire request
1034 * blk_rq_cur_bytes() : bytes left in the current segment
24b83deb
CH
1035 * blk_rq_sectors() : sectors left in the entire request
1036 * blk_rq_cur_sectors() : sectors left in the current segment
1037 * blk_rq_stats_sectors() : sectors of the entire request used for stats
1038 */
1039static inline sector_t blk_rq_pos(const struct request *rq)
1040{
1041 return rq->__sector;
1042}
1043
1044static inline unsigned int blk_rq_bytes(const struct request *rq)
1045{
1046 return rq->__data_len;
1047}
1048
1049static inline int blk_rq_cur_bytes(const struct request *rq)
1050{
b6559d8f
CH
1051 if (!rq->bio)
1052 return 0;
1053 if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
1054 return rq->bio->bi_iter.bi_size;
1055 return bio_iovec(rq->bio).bv_len;
24b83deb
CH
1056}
1057
24b83deb
CH
1058static inline unsigned int blk_rq_sectors(const struct request *rq)
1059{
1060 return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1061}
1062
1063static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1064{
1065 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1066}
1067
1068static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1069{
1070 return rq->stats_sectors;
1071}
1072
1073/*
1074 * Some commands like WRITE SAME have a payload or data transfer size which
1075 * is different from the size of the request. Any driver that supports such
1076 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1077 * calculate the data transfer size.
1078 */
1079static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1080{
1081 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1082 return rq->special_vec.bv_len;
1083 return blk_rq_bytes(rq);
1084}
1085
1086/*
1087 * Return the first full biovec in the request. The caller needs to check that
1088 * there are any bvecs before calling this helper.
1089 */
1090static inline struct bio_vec req_bvec(struct request *rq)
1091{
1092 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1093 return rq->special_vec;
1094 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1095}
1096
1097static inline unsigned int blk_rq_count_bios(struct request *rq)
1098{
1099 unsigned int nr_bios = 0;
1100 struct bio *bio;
1101
1102 __rq_for_each_bio(bio, rq)
1103 nr_bios++;
1104
1105 return nr_bios;
1106}
1107
1108void blk_steal_bios(struct bio_list *list, struct request *rq);
1109
1110/*
1111 * Request completion related functions.
1112 *
1113 * blk_update_request() completes given number of bytes and updates
1114 * the request without completing it.
1115 */
1116bool blk_update_request(struct request *rq, blk_status_t error,
1117 unsigned int nr_bytes);
1118void blk_abort_request(struct request *);
1119
1120/*
1121 * Number of physical segments as sent to the device.
1122 *
1123 * Normally this is the number of discontiguous data segments sent by the
1124 * submitter. But for data-less command like discard we might have no
1125 * actual data segments submitted, but the driver might have to add it's
1126 * own special payload. In that case we still return 1 here so that this
1127 * special payload will be mapped.
1128 */
1129static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1130{
1131 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1132 return 1;
1133 return rq->nr_phys_segments;
1134}
1135
1136/*
1137 * Number of discard segments (or ranges) the driver needs to fill in.
1138 * Each discard bio merged into a request is counted as one segment.
1139 */
1140static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1141{
1142 return max_t(unsigned short, rq->nr_phys_segments, 1);
1143}
1144
1145int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1146 struct scatterlist *sglist, struct scatterlist **last_sg);
1147static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1148 struct scatterlist *sglist)
1149{
1150 struct scatterlist *last_sg = NULL;
1151
1152 return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1153}
1154void blk_dump_rq_flags(struct request *, char *);
1155
1156#ifdef CONFIG_BLK_DEV_ZONED
1157static inline unsigned int blk_rq_zone_no(struct request *rq)
1158{
d86e716a 1159 return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
24b83deb
CH
1160}
1161
1162static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1163{
d86e716a 1164 return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
24b83deb
CH
1165}
1166
1167bool blk_req_needs_zone_write_lock(struct request *rq);
1168bool blk_req_zone_write_trylock(struct request *rq);
1169void __blk_req_zone_write_lock(struct request *rq);
1170void __blk_req_zone_write_unlock(struct request *rq);
1171
1172static inline void blk_req_zone_write_lock(struct request *rq)
1173{
1174 if (blk_req_needs_zone_write_lock(rq))
1175 __blk_req_zone_write_lock(rq);
1176}
1177
1178static inline void blk_req_zone_write_unlock(struct request *rq)
1179{
1180 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1181 __blk_req_zone_write_unlock(rq);
1182}
1183
1184static inline bool blk_req_zone_is_write_locked(struct request *rq)
1185{
d86e716a
CH
1186 return rq->q->disk->seq_zones_wlock &&
1187 test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
24b83deb
CH
1188}
1189
1190static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1191{
1192 if (!blk_req_needs_zone_write_lock(rq))
1193 return true;
1194 return !blk_req_zone_is_write_locked(rq);
1195}
1196#else /* CONFIG_BLK_DEV_ZONED */
1197static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1198{
1199 return false;
1200}
1201
1202static inline void blk_req_zone_write_lock(struct request *rq)
1203{
1204}
1205
1206static inline void blk_req_zone_write_unlock(struct request *rq)
1207{
1208}
1209static inline bool blk_req_zone_is_write_locked(struct request *rq)
1210{
1211 return false;
1212}
1213
1214static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1215{
1216 return true;
1217}
1218#endif /* CONFIG_BLK_DEV_ZONED */
1219
24b83deb 1220#endif /* BLK_MQ_H */