block: pass a queue_limits argument to blk_mq_init_queue
[linux-2.6-block.git] / include / linux / blk-mq.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
320ae51f
JA
2#ifndef BLK_MQ_H
3#define BLK_MQ_H
4
5#include <linux/blkdev.h>
88459642 6#include <linux/sbitmap.h>
fb01a293 7#include <linux/lockdep.h>
24b83deb 8#include <linux/scatterlist.h>
e028f167 9#include <linux/prefetch.h>
80bd4a7a 10#include <linux/srcu.h>
320ae51f
JA
11
12struct blk_mq_tags;
f70ced09 13struct blk_flush_queue;
320ae51f 14
24b83deb 15#define BLKDEV_MIN_RQ 4
d2a27964 16#define BLKDEV_DEFAULT_RQ 128
24b83deb 17
de671d61
JA
18enum rq_end_io_ret {
19 RQ_END_IO_NONE,
20 RQ_END_IO_FREE,
21};
22
23typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
24b83deb
CH
24
25/*
26 * request flags */
27typedef __u32 __bitwise req_flags_t;
28
29/* drive already may have started this one */
30#define RQF_STARTED ((__force req_flags_t)(1 << 1))
24b83deb
CH
31/* request for flush sequence */
32#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
33/* merge of different types, fail separately */
34#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
24b83deb
CH
35/* don't call prep for this one */
36#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
dd6216bb
CH
37/* use hctx->sched_tags */
38#define RQF_SCHED_TAGS ((__force req_flags_t)(1 << 8))
39/* use an I/O scheduler for this request */
40#define RQF_USE_SCHED ((__force req_flags_t)(1 << 9))
24b83deb
CH
41/* vaguely specified driver internal error. Ignored by the block layer */
42#define RQF_FAILED ((__force req_flags_t)(1 << 10))
43/* don't warn about errors */
44#define RQF_QUIET ((__force req_flags_t)(1 << 11))
24b83deb
CH
45/* account into disk and partition IO statistics */
46#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
47/* runtime pm request */
48#define RQF_PM ((__force req_flags_t)(1 << 15))
49/* on IO scheduler merge hash */
50#define RQF_HASHED ((__force req_flags_t)(1 << 16))
51/* track IO completion time */
52#define RQF_STATS ((__force req_flags_t)(1 << 17))
53/* Look at ->special_vec for the actual data payload instead of the
54 bio chain. */
55#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
56/* The per-zone write lock is held for this request */
57#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
24b83deb
CH
58/* ->timeout has been called, don't expire again */
59#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
dd6216bb 60#define RQF_RESV ((__force req_flags_t)(1 << 23))
24b83deb
CH
61
62/* flags that prevent us from merging requests: */
63#define RQF_NOMERGE_FLAGS \
9a67aa52 64 (RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
24b83deb
CH
65
66enum mq_rq_state {
67 MQ_RQ_IDLE = 0,
68 MQ_RQ_IN_FLIGHT = 1,
69 MQ_RQ_COMPLETE = 2,
70};
71
72/*
73 * Try to put the fields that are referenced together in the same cacheline.
74 *
75 * If you modify this structure, make sure to update blk_rq_init() and
76 * especially blk_mq_rq_ctx_init() to take care of the added fields.
77 */
78struct request {
79 struct request_queue *q;
80 struct blk_mq_ctx *mq_ctx;
81 struct blk_mq_hw_ctx *mq_hctx;
82
16458cf3 83 blk_opf_t cmd_flags; /* op and common flags */
24b83deb
CH
84 req_flags_t rq_flags;
85
86 int tag;
87 int internal_tag;
88
b6087629
JA
89 unsigned int timeout;
90
24b83deb
CH
91 /* the following two fields are internal, NEVER access directly */
92 unsigned int __data_len; /* total data len */
93 sector_t __sector; /* sector cursor */
94
95 struct bio *bio;
96 struct bio *biotail;
97
47c122e3
JA
98 union {
99 struct list_head queuelist;
100 struct request *rq_next;
101 };
24b83deb 102
24b83deb
CH
103 struct block_device *part;
104#ifdef CONFIG_BLK_RQ_ALLOC_TIME
105 /* Time that the first bio started allocating this request. */
106 u64 alloc_time_ns;
107#endif
108 /* Time that this request was allocated for this IO. */
109 u64 start_time_ns;
110 /* Time that I/O was submitted to the device. */
111 u64 io_start_time_ns;
112
113#ifdef CONFIG_BLK_WBT
114 unsigned short wbt_flags;
115#endif
116 /*
117 * rq sectors used for blk stats. It has the same value
118 * with blk_rq_sectors(rq), except that it never be zeroed
119 * by completion.
120 */
121 unsigned short stats_sectors;
122
123 /*
124 * Number of scatter-gather DMA addr+len pairs after
125 * physical address coalescing is performed.
126 */
127 unsigned short nr_phys_segments;
128
129#ifdef CONFIG_BLK_DEV_INTEGRITY
130 unsigned short nr_integrity_segments;
131#endif
132
133#ifdef CONFIG_BLK_INLINE_ENCRYPTION
134 struct bio_crypt_ctx *crypt_ctx;
cb77cb5a 135 struct blk_crypto_keyslot *crypt_keyslot;
24b83deb
CH
136#endif
137
24b83deb
CH
138 unsigned short ioprio;
139
140 enum mq_rq_state state;
0a467d0f 141 atomic_t ref;
24b83deb 142
24b83deb
CH
143 unsigned long deadline;
144
b6087629
JA
145 /*
146 * The hash is used inside the scheduler, and killed once the
147 * request reaches the dispatch list. The ipi_list is only used
148 * to queue the request for softirq completion, which is long
149 * after the request has been unhashed (and even removed from
150 * the dispatch list).
151 */
152 union {
153 struct hlist_node hash; /* merge hash */
154 struct llist_node ipi_list;
155 };
156
157 /*
158 * The rb_node is only used inside the io scheduler, requests
dc8cbb65
JA
159 * are pruned when moved to the dispatch queue. special_vec must
160 * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
161 * insert into an IO scheduler.
b6087629
JA
162 */
163 union {
164 struct rb_node rb_node; /* sort/lookup */
165 struct bio_vec special_vec;
b6087629
JA
166 };
167
b6087629
JA
168 /*
169 * Three pointers are available for the IO schedulers, if they need
be4c4278 170 * more they have to dynamically allocate it.
b6087629 171 */
be4c4278
BVA
172 struct {
173 struct io_cq *icq;
174 void *priv[2];
175 } elv;
176
177 struct {
178 unsigned int seq;
be4c4278
BVA
179 rq_end_io_fn *saved_end_io;
180 } flush;
b6087629 181
660e802c 182 u64 fifo_time;
24b83deb
CH
183
184 /*
185 * completion callback.
186 */
187 rq_end_io_fn *end_io;
188 void *end_io_data;
189};
190
2d9b02be
BVA
191static inline enum req_op req_op(const struct request *req)
192{
193 return req->cmd_flags & REQ_OP_MASK;
194}
24b83deb
CH
195
196static inline bool blk_rq_is_passthrough(struct request *rq)
197{
712fd23a 198 return blk_op_is_passthrough(rq->cmd_flags);
24b83deb
CH
199}
200
201static inline unsigned short req_get_ioprio(struct request *req)
202{
203 return req->ioprio;
204}
205
206#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
207
208#define rq_dma_dir(rq) \
209 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
210
edce22e1
KB
211#define rq_list_add(listptr, rq) do { \
212 (rq)->rq_next = *(listptr); \
213 *(listptr) = rq; \
214} while (0)
215
34e0a279
JK
216#define rq_list_add_tail(lastpptr, rq) do { \
217 (rq)->rq_next = NULL; \
218 **(lastpptr) = rq; \
219 *(lastpptr) = &rq->rq_next; \
220} while (0)
221
edce22e1
KB
222#define rq_list_pop(listptr) \
223({ \
224 struct request *__req = NULL; \
225 if ((listptr) && *(listptr)) { \
226 __req = *(listptr); \
227 *(listptr) = __req->rq_next; \
228 } \
229 __req; \
230})
231
232#define rq_list_peek(listptr) \
233({ \
234 struct request *__req = NULL; \
235 if ((listptr) && *(listptr)) \
236 __req = *(listptr); \
237 __req; \
238})
239
240#define rq_list_for_each(listptr, pos) \
241 for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
242
3764fd05
KB
243#define rq_list_for_each_safe(listptr, pos, nxt) \
244 for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
245 pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
246
edce22e1
KB
247#define rq_list_next(rq) (rq)->rq_next
248#define rq_list_empty(list) ((list) == (struct request *) NULL)
249
d2528be7
KB
250/**
251 * rq_list_move() - move a struct request from one list to another
252 * @src: The source list @rq is currently in
253 * @dst: The destination list that @rq will be appended to
254 * @rq: The request to move
255 * @prev: The request preceding @rq in @src (NULL if @rq is the head)
256 */
292c33c9 257static inline void rq_list_move(struct request **src, struct request **dst,
d2528be7
KB
258 struct request *rq, struct request *prev)
259{
260 if (prev)
261 prev->rq_next = rq->rq_next;
262 else
263 *src = rq->rq_next;
264 rq_list_add(dst, rq);
265}
266
b2bed51a
BVA
267/**
268 * enum blk_eh_timer_return - How the timeout handler should proceed
269 * @BLK_EH_DONE: The block driver completed the command or will complete it at
270 * a later time.
271 * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
272 * request to complete.
273 */
24b83deb 274enum blk_eh_timer_return {
b2bed51a
BVA
275 BLK_EH_DONE,
276 BLK_EH_RESET_TIMER,
24b83deb
CH
277};
278
279#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
280#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
281
fe644072 282/**
d386732b
AA
283 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
284 * block device
fe644072 285 */
320ae51f
JA
286struct blk_mq_hw_ctx {
287 struct {
d386732b 288 /** @lock: Protects the dispatch list. */
320ae51f 289 spinlock_t lock;
d386732b
AA
290 /**
291 * @dispatch: Used for requests that are ready to be
292 * dispatched to the hardware but for some reason (e.g. lack of
293 * resources) could not be sent to the hardware. As soon as the
294 * driver can send new requests, requests at this list will
295 * be sent first for a fairer dispatch.
296 */
320ae51f 297 struct list_head dispatch;
d386732b
AA
298 /**
299 * @state: BLK_MQ_S_* flags. Defines the state of the hw
300 * queue (active, scheduled to restart, stopped).
301 */
302 unsigned long state;
320ae51f
JA
303 } ____cacheline_aligned_in_smp;
304
d386732b
AA
305 /**
306 * @run_work: Used for scheduling a hardware queue run at a later time.
307 */
9f993737 308 struct delayed_work run_work;
d386732b 309 /** @cpumask: Map of available CPUs where this hctx can run. */
e4043dcf 310 cpumask_var_t cpumask;
d386732b
AA
311 /**
312 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
313 * selection from @cpumask.
314 */
506e931f 315 int next_cpu;
d386732b
AA
316 /**
317 * @next_cpu_batch: Counter of how many works left in the batch before
318 * changing to the next CPU.
319 */
506e931f 320 int next_cpu_batch;
320ae51f 321
d386732b
AA
322 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
323 unsigned long flags;
320ae51f 324
d386732b
AA
325 /**
326 * @sched_data: Pointer owned by the IO scheduler attached to a request
327 * queue. It's up to the IO scheduler how to use this pointer.
328 */
bd166ef1 329 void *sched_data;
d386732b
AA
330 /**
331 * @queue: Pointer to the request queue that owns this hardware context.
332 */
320ae51f 333 struct request_queue *queue;
d386732b 334 /** @fq: Queue of requests that need to perform a flush operation. */
f70ced09 335 struct blk_flush_queue *fq;
320ae51f 336
d386732b
AA
337 /**
338 * @driver_data: Pointer to data owned by the block driver that created
339 * this hctx
340 */
320ae51f
JA
341 void *driver_data;
342
d386732b
AA
343 /**
344 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
345 * pending request in that software queue.
346 */
88459642 347 struct sbitmap ctx_map;
1429d7c9 348
d386732b
AA
349 /**
350 * @dispatch_from: Software queue to be used when no scheduler was
351 * selected.
352 */
b347689f 353 struct blk_mq_ctx *dispatch_from;
d386732b
AA
354 /**
355 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
356 * decide if the hw_queue is busy using Exponential Weighted Moving
357 * Average algorithm.
358 */
6e768717 359 unsigned int dispatch_busy;
b347689f 360
d386732b 361 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
f31967f0 362 unsigned short type;
d386732b 363 /** @nr_ctx: Number of software queues. */
f31967f0 364 unsigned short nr_ctx;
d386732b 365 /** @ctxs: Array of software queues. */
6e768717 366 struct blk_mq_ctx **ctxs;
4bb659b1 367
d386732b 368 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
5815839b 369 spinlock_t dispatch_wait_lock;
d386732b
AA
370 /**
371 * @dispatch_wait: Waitqueue to put requests when there is no tag
372 * available at the moment, to wait for another try in the future.
373 */
eb619fdb 374 wait_queue_entry_t dispatch_wait;
d386732b
AA
375
376 /**
377 * @wait_index: Index of next available dispatch_wait queue to insert
378 * requests.
379 */
8537b120 380 atomic_t wait_index;
320ae51f 381
d386732b
AA
382 /**
383 * @tags: Tags owned by the block driver. A tag at this set is only
384 * assigned when a request is dispatched from a hardware queue.
385 */
320ae51f 386 struct blk_mq_tags *tags;
d386732b
AA
387 /**
388 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
389 * scheduler associated with a request queue, a tag is assigned when
390 * that request is allocated. Else, this member is not used.
391 */
bd166ef1 392 struct blk_mq_tags *sched_tags;
320ae51f 393
d386732b 394 /** @numa_node: NUMA node the storage adapter has been connected to. */
320ae51f 395 unsigned int numa_node;
d386732b 396 /** @queue_num: Index of this hardware queue. */
17ded320 397 unsigned int queue_num;
320ae51f 398
d386732b
AA
399 /**
400 * @nr_active: Number of active requests. Only used when a tag set is
401 * shared across request queues.
402 */
0d2602ca
JA
403 atomic_t nr_active;
404
bf0beec0
ML
405 /** @cpuhp_online: List to store request if CPU is going to die */
406 struct hlist_node cpuhp_online;
d386732b 407 /** @cpuhp_dead: List to store request if some CPU die. */
9467f859 408 struct hlist_node cpuhp_dead;
d386732b 409 /** @kobj: Kernel object for sysfs. */
320ae51f 410 struct kobject kobj;
05229bee 411
9c1051aa 412#ifdef CONFIG_BLK_DEBUG_FS
d386732b
AA
413 /**
414 * @debugfs_dir: debugfs directory for this hardware queue. Named
415 * as cpu<cpu_number>.
416 */
9c1051aa 417 struct dentry *debugfs_dir;
d386732b 418 /** @sched_debugfs_dir: debugfs directory for the scheduler. */
d332ce09 419 struct dentry *sched_debugfs_dir;
9c1051aa 420#endif
07319678 421
2dd209f0
BVA
422 /**
423 * @hctx_list: if this hctx is not in use, this is an entry in
424 * q->unused_hctx_list.
425 */
2f8f1336 426 struct list_head hctx_list;
320ae51f
JA
427};
428
7a18312c 429/**
d386732b 430 * struct blk_mq_queue_map - Map software queues to hardware queues
7a18312c
BVA
431 * @mq_map: CPU ID to hardware queue index map. This is an array
432 * with nr_cpu_ids elements. Each element has a value in the range
433 * [@queue_offset, @queue_offset + @nr_queues).
434 * @nr_queues: Number of hardware queues to map CPU IDs onto.
435 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
436 * driver to map each hardware queue type (enum hctx_type) onto a distinct
437 * set of hardware queues.
438 */
ed76e329
JA
439struct blk_mq_queue_map {
440 unsigned int *mq_map;
441 unsigned int nr_queues;
843477d4 442 unsigned int queue_offset;
ed76e329
JA
443};
444
d386732b
AA
445/**
446 * enum hctx_type - Type of hardware queue
447 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
448 * @HCTX_TYPE_READ: Just for READ I/O.
449 * @HCTX_TYPE_POLL: Polled I/O of any kind.
450 * @HCTX_MAX_TYPES: Number of types of hctx.
451 */
e20ba6e1 452enum hctx_type {
d386732b
AA
453 HCTX_TYPE_DEFAULT,
454 HCTX_TYPE_READ,
455 HCTX_TYPE_POLL,
e20ba6e1
CH
456
457 HCTX_MAX_TYPES,
ed76e329
JA
458};
459
7a18312c
BVA
460/**
461 * struct blk_mq_tag_set - tag set that can be shared between request queues
d88cbbb3 462 * @ops: Pointers to functions that implement block driver behavior.
7a18312c
BVA
463 * @map: One or more ctx -> hctx mappings. One map exists for each
464 * hardware queue type (enum hctx_type) that the driver wishes
465 * to support. There are no restrictions on maps being of the
466 * same size, and it's perfectly legal to share maps between
467 * types.
468 * @nr_maps: Number of elements in the @map array. A number in the range
469 * [1, HCTX_MAX_TYPES].
7a18312c
BVA
470 * @nr_hw_queues: Number of hardware queues supported by the block driver that
471 * owns this data structure.
472 * @queue_depth: Number of tags per hardware queue, reserved tags included.
473 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
474 * allocations.
475 * @cmd_size: Number of additional bytes to allocate per request. The block
476 * driver owns these additional bytes.
477 * @numa_node: NUMA node the storage adapter has been connected to.
478 * @timeout: Request processing timeout in jiffies.
479 * @flags: Zero or more BLK_MQ_F_* flags.
480 * @driver_data: Pointer to data owned by the block driver that created this
481 * tag set.
482 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
483 * elements.
079a2e3e
JG
484 * @shared_tags:
485 * Shared set of tags. Has @nr_hw_queues elements. If set,
486 * shared by all @tags.
7a18312c
BVA
487 * @tag_list_lock: Serializes tag_list accesses.
488 * @tag_list: List of the request queues that use this tag set. See also
489 * request_queue.tag_set_list.
80bd4a7a
CH
490 * @srcu: Use as lock when type of the request queue is blocking
491 * (BLK_MQ_F_BLOCKING).
7a18312c 492 */
24d2f903 493struct blk_mq_tag_set {
d88cbbb3 494 const struct blk_mq_ops *ops;
ed76e329 495 struct blk_mq_queue_map map[HCTX_MAX_TYPES];
7a18312c 496 unsigned int nr_maps;
7a18312c
BVA
497 unsigned int nr_hw_queues;
498 unsigned int queue_depth;
320ae51f 499 unsigned int reserved_tags;
7a18312c 500 unsigned int cmd_size;
320ae51f
JA
501 int numa_node;
502 unsigned int timeout;
7a18312c 503 unsigned int flags;
24d2f903
CH
504 void *driver_data;
505
506 struct blk_mq_tags **tags;
0d2602ca 507
079a2e3e 508 struct blk_mq_tags *shared_tags;
e155b0c2 509
0d2602ca
JA
510 struct mutex tag_list_lock;
511 struct list_head tag_list;
80bd4a7a 512 struct srcu_struct *srcu;
320ae51f
JA
513};
514
d386732b
AA
515/**
516 * struct blk_mq_queue_data - Data about a request inserted in a queue
517 *
518 * @rq: Request pointer.
519 * @last: If it is the last request in the queue.
520 */
74c45052
JA
521struct blk_mq_queue_data {
522 struct request *rq;
74c45052
JA
523 bool last;
524};
525
2dd6532e 526typedef bool (busy_tag_iter_fn)(struct request *, void *);
05229bee 527
d386732b
AA
528/**
529 * struct blk_mq_ops - Callback functions that implements block driver
530 * behaviour.
531 */
320ae51f 532struct blk_mq_ops {
d386732b
AA
533 /**
534 * @queue_rq: Queue a new request from block IO.
320ae51f 535 */
0516c2f6
DW
536 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
537 const struct blk_mq_queue_data *);
320ae51f 538
d386732b
AA
539 /**
540 * @commit_rqs: If a driver uses bd->last to judge when to submit
541 * requests to hardware, it must define this function. In case of errors
542 * that make us stop issuing further requests, this hook serves the
d666ba98
JA
543 * purpose of kicking the hardware (which the last request otherwise
544 * would have done).
545 */
0516c2f6 546 void (*commit_rqs)(struct blk_mq_hw_ctx *);
d666ba98 547
3c67d44d
JA
548 /**
549 * @queue_rqs: Queue a list of new requests. Driver is guaranteed
550 * that each request belongs to the same queue. If the driver doesn't
551 * empty the @rqlist completely, then the rest will be queued
552 * individually by the block layer upon return.
553 */
554 void (*queue_rqs)(struct request **rqlist);
555
d386732b
AA
556 /**
557 * @get_budget: Reserve budget before queue request, once .queue_rq is
de148297
ML
558 * run, it is driver's responsibility to release the
559 * reserved budget. Also we have to handle failure case
560 * of .get_budget for avoiding I/O deadlock.
561 */
2a5a24aa 562 int (*get_budget)(struct request_queue *);
0516c2f6 563
d386732b
AA
564 /**
565 * @put_budget: Release the reserved budget.
566 */
2a5a24aa 567 void (*put_budget)(struct request_queue *, int);
de148297 568
85367040
ML
569 /**
570 * @set_rq_budget_token: store rq's budget token
d022d18c
ML
571 */
572 void (*set_rq_budget_token)(struct request *, int);
85367040
ML
573 /**
574 * @get_rq_budget_token: retrieve rq's budget token
d022d18c
ML
575 */
576 int (*get_rq_budget_token)(struct request *);
577
d386732b
AA
578 /**
579 * @timeout: Called on request timeout.
320ae51f 580 */
9bdb4833 581 enum blk_eh_timer_return (*timeout)(struct request *);
320ae51f 582
d386732b
AA
583 /**
584 * @poll: Called to poll for completion of a specific tag.
05229bee 585 */
5a72e899 586 int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
05229bee 587
d386732b
AA
588 /**
589 * @complete: Mark the request as complete.
590 */
0516c2f6 591 void (*complete)(struct request *);
30a91cb4 592
d386732b
AA
593 /**
594 * @init_hctx: Called when the block layer side of a hardware queue has
595 * been set up, allowing the driver to allocate/init matching
596 * structures.
320ae51f 597 */
0516c2f6 598 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
d386732b
AA
599 /**
600 * @exit_hctx: Ditto for exit/teardown.
601 */
0516c2f6 602 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
e9b267d9 603
d386732b
AA
604 /**
605 * @init_request: Called for every command allocated by the block layer
606 * to allow the driver to set up driver specific data.
f70ced09
ML
607 *
608 * Tag greater than or equal to queue_depth is for setting up
609 * flush request.
e9b267d9 610 */
0516c2f6
DW
611 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
612 unsigned int, unsigned int);
d386732b
AA
613 /**
614 * @exit_request: Ditto for exit/teardown.
615 */
0516c2f6
DW
616 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
617 unsigned int);
d386732b 618
d386732b
AA
619 /**
620 * @cleanup_rq: Called before freeing one request which isn't completed
621 * yet, and usually for freeing the driver private data.
226b4fc7 622 */
0516c2f6 623 void (*cleanup_rq)(struct request *);
226b4fc7 624
d386732b
AA
625 /**
626 * @busy: If set, returns whether or not this queue currently is busy.
9ba20527 627 */
0516c2f6 628 bool (*busy)(struct request_queue *);
9ba20527 629
d386732b
AA
630 /**
631 * @map_queues: This allows drivers specify their own queue mapping by
632 * overriding the setup-time function that builds the mq_map.
633 */
a4e1d0b7 634 void (*map_queues)(struct blk_mq_tag_set *set);
2836ee4b
BVA
635
636#ifdef CONFIG_BLK_DEBUG_FS
d386732b
AA
637 /**
638 * @show_rq: Used by the debugfs implementation to show driver-specific
2836ee4b
BVA
639 * information about a request.
640 */
641 void (*show_rq)(struct seq_file *m, struct request *rq);
642#endif
320ae51f
JA
643};
644
645enum {
320ae51f 646 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
51db1c37 647 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
bf0beec0
ML
648 /*
649 * Set when this device requires underlying blk-mq device for
650 * completing IO:
651 */
652 BLK_MQ_F_STACKING = 1 << 2,
32bc15af 653 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
1b792f2f 654 BLK_MQ_F_BLOCKING = 1 << 5,
90b71980 655 /* Do not allow an I/O scheduler to be configured. */
d3484991 656 BLK_MQ_F_NO_SCHED = 1 << 6,
90b71980
BVA
657 /*
658 * Select 'none' during queue registration in case of a single hwq
659 * or shared hwqs instead of 'mq-deadline'.
660 */
661 BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
24391c0d
SL
662 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
663 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
320ae51f 664
5d12f905 665 BLK_MQ_S_STOPPED = 0,
0d2602ca 666 BLK_MQ_S_TAG_ACTIVE = 1,
bd166ef1 667 BLK_MQ_S_SCHED_RESTART = 2,
320ae51f 668
bf0beec0
ML
669 /* hw queue is inactive after all its CPUs become offline */
670 BLK_MQ_S_INACTIVE = 3,
671
a4391c64 672 BLK_MQ_MAX_DEPTH = 10240,
506e931f
JA
673
674 BLK_MQ_CPU_WORK_BATCH = 8,
320ae51f 675};
24391c0d
SL
676#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
677 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
678 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
679#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
680 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
681 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
320ae51f 682
e155b0c2
JG
683#define BLK_MQ_NO_HCTX_IDX (-1U)
684
4dcc4874
CH
685struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
686 struct lock_class_key *lkclass);
b461dfc4
CH
687#define blk_mq_alloc_disk(set, queuedata) \
688({ \
689 static struct lock_class_key __key; \
b461dfc4 690 \
4dcc4874 691 __blk_mq_alloc_disk(set, queuedata, &__key); \
b461dfc4 692})
6f8191fd
CH
693struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
694 struct lock_class_key *lkclass);
9ac4dd8c
CH
695struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
696 struct queue_limits *lim, void *queuedata);
26a9750a
CH
697int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
698 struct request_queue *q);
6f8191fd 699void blk_mq_destroy_queue(struct request_queue *);
320ae51f 700
24d2f903 701int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
cdb14e0f
CH
702int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
703 const struct blk_mq_ops *ops, unsigned int queue_depth,
704 unsigned int set_flags);
24d2f903
CH
705void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
706
320ae51f 707void blk_mq_free_request(struct request *rq);
f6c80cff
KB
708int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
709 unsigned int poll_flags);
6f3b0e8b 710
3c94d83c 711bool blk_mq_queue_inflight(struct request_queue *q);
ae879912 712
6f3b0e8b 713enum {
9a95e4ef
BVA
714 /* return when out of requests */
715 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
716 /* allocate from reserved pool */
717 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
0854bcdc
BVA
718 /* set RQF_PM */
719 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
6f3b0e8b
CH
720};
721
16458cf3 722struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
9a95e4ef 723 blk_mq_req_flags_t flags);
cd6ce148 724struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
16458cf3 725 blk_opf_t opf, blk_mq_req_flags_t flags,
9a95e4ef 726 unsigned int hctx_idx);
e028f167
JA
727
728/*
729 * Tag address space map.
730 */
731struct blk_mq_tags {
732 unsigned int nr_tags;
733 unsigned int nr_reserved_tags;
4f1731df 734 unsigned int active_queues;
e028f167
JA
735
736 struct sbitmap_queue bitmap_tags;
737 struct sbitmap_queue breserved_tags;
738
739 struct request **rqs;
740 struct request **static_rqs;
741 struct list_head page_list;
742
743 /*
744 * used to clear request reference in rqs[] before freeing one
745 * request pool
746 */
747 spinlock_t lock;
748};
749
750static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
751 unsigned int tag)
752{
753 if (tag < tags->nr_tags) {
754 prefetch(tags->rqs[tag]);
755 return tags->rqs[tag];
756 }
757
758 return NULL;
759}
320ae51f 760
205fb5f5
BVA
761enum {
762 BLK_MQ_UNIQUE_TAG_BITS = 16,
763 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
764};
765
766u32 blk_mq_unique_tag(struct request *rq);
767
768static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
769{
770 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
771}
772
773static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
774{
775 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
776}
777
27a46989
PB
778/**
779 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
780 * @rq: target request.
781 */
782static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
783{
784 return READ_ONCE(rq->state);
785}
786
787static inline int blk_mq_request_started(struct request *rq)
788{
789 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
790}
791
792static inline int blk_mq_request_completed(struct request *rq)
793{
794 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
795}
320ae51f 796
83fba8c8
CL
797/*
798 *
799 * Set the state to complete when completing a request from inside ->queue_rq.
800 * This is used by drivers that want to ensure special complete actions that
801 * need access to the request are called on failure, e.g. by nvme for
802 * multipathing.
803 */
804static inline void blk_mq_set_request_complete(struct request *rq)
805{
806 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
807}
808
e8dc17e2
SAS
809/*
810 * Complete the request directly instead of deferring it to softirq or
811 * completing it another CPU. Useful in preemptible instead of an interrupt.
812 */
813static inline void blk_mq_complete_request_direct(struct request *rq,
814 void (*complete)(struct request *rq))
815{
816 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
817 complete(rq);
818}
819
e2490073 820void blk_mq_start_request(struct request *rq);
2a842aca
CH
821void blk_mq_end_request(struct request *rq, blk_status_t error);
822void __blk_mq_end_request(struct request *rq, blk_status_t error);
f794f335
JA
823void blk_mq_end_request_batch(struct io_comp_batch *ib);
824
825/*
826 * Only need start/end time stamping if we have iostat or
827 * blk stats enabled, or using an IO scheduler.
828 */
829static inline bool blk_mq_need_time_stamp(struct request *rq)
830{
8e6e83d7
KK
831 /*
832 * passthrough io doesn't use iostat accounting, cgroup stats
833 * and io scheduler functionalities.
834 */
835 if (blk_rq_is_passthrough(rq))
836 return false;
dd6216bb 837 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
f794f335
JA
838}
839
99e48cd6
JG
840static inline bool blk_mq_is_reserved_rq(struct request *rq)
841{
842 return rq->rq_flags & RQF_RESV;
843}
844
f794f335
JA
845/*
846 * Batched completions only work when there is no I/O error and no special
847 * ->end_io handler.
848 */
849static inline bool blk_mq_add_to_batch(struct request *req,
850 struct io_comp_batch *iob, int ioerror,
851 void (*complete)(struct io_comp_batch *))
852{
c6b7a3a2
ML
853 /*
854 * blk_mq_end_request_batch() can't end request allocated from
855 * sched tags
856 */
857 if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
2d87d455 858 (req->end_io && !blk_rq_is_passthrough(req)))
f794f335 859 return false;
ab3e1d3b 860
f794f335
JA
861 if (!iob->complete)
862 iob->complete = complete;
863 else if (iob->complete != complete)
864 return false;
865 iob->need_ts |= blk_mq_need_time_stamp(req);
866 rq_list_add(&iob->req_list, req);
867 return true;
868}
320ae51f 869
2b053aca 870void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
6fca6a61 871void blk_mq_kick_requeue_list(struct request_queue *q);
2849450a 872void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
15f73f5b 873void blk_mq_complete_request(struct request *rq);
40d09b53 874bool blk_mq_complete_request_remote(struct request *rq);
320ae51f
JA
875void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
876void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
280d45f6 877void blk_mq_stop_hw_queues(struct request_queue *q);
2f268556 878void blk_mq_start_hw_queues(struct request_queue *q);
ae911c5e 879void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
1b4a3258 880void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
97e01209 881void blk_mq_quiesce_queue(struct request_queue *q);
483239c7 882void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
414dd48e
CL
883void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
884void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
e4e73913 885void blk_mq_unquiesce_queue(struct request_queue *q);
7587a5ae 886void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
626fb735 887void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
b94ec296 888void blk_mq_run_hw_queues(struct request_queue *q, bool async);
b9151e7b 889void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
e0489487
SG
890void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
891 busy_tag_iter_fn *fn, void *priv);
f9934a80 892void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
c761d96b 893void blk_mq_freeze_queue(struct request_queue *q);
b4c6a028 894void blk_mq_unfreeze_queue(struct request_queue *q);
1671d522 895void blk_freeze_queue_start(struct request_queue *q);
6bae363e 896void blk_mq_freeze_queue_wait(struct request_queue *q);
f91328c4
KB
897int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
898 unsigned long timeout);
320ae51f 899
a4e1d0b7 900void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
868f2f0b
KB
901void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
902
852ec809 903void blk_mq_quiesce_queue_nowait(struct request_queue *q);
4f084b41 904
9cf2bab6
JA
905unsigned int blk_mq_rq_cpu(struct request *rq);
906
15f73f5b
CH
907bool __blk_should_fake_timeout(struct request_queue *q);
908static inline bool blk_should_fake_timeout(struct request_queue *q)
909{
910 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
911 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
912 return __blk_should_fake_timeout(q);
913 return false;
914}
915
d386732b
AA
916/**
917 * blk_mq_rq_from_pdu - cast a PDU to a request
918 * @pdu: the PDU (Protocol Data Unit) to be casted
919 *
920 * Return: request
921 *
320ae51f 922 * Driver command data is immediately after the request. So subtract request
d386732b 923 * size to get back to the original request.
320ae51f
JA
924 */
925static inline struct request *blk_mq_rq_from_pdu(void *pdu)
926{
927 return pdu - sizeof(struct request);
928}
d386732b
AA
929
930/**
931 * blk_mq_rq_to_pdu - cast a request to a PDU
932 * @rq: the request to be casted
933 *
934 * Return: pointer to the PDU
935 *
936 * Driver command data is immediately after the request. So add request to get
937 * the PDU.
938 */
320ae51f
JA
939static inline void *blk_mq_rq_to_pdu(struct request *rq)
940{
2963e3f7 941 return rq + 1;
320ae51f
JA
942}
943
320ae51f 944#define queue_for_each_hw_ctx(q, hctx, i) \
4e5cc99e 945 xa_for_each(&(q)->hctx_table, (i), (hctx))
320ae51f 946
320ae51f 947#define hctx_for_each_ctx(hctx, ctx, i) \
0d0b7d42
JA
948 for ((i) = 0; (i) < (hctx)->nr_ctx && \
949 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
320ae51f 950
226b4fc7
ML
951static inline void blk_mq_cleanup_rq(struct request *rq)
952{
953 if (rq->q->mq_ops->cleanup_rq)
954 rq->q->mq_ops->cleanup_rq(rq);
955}
956
53ffabfd
CK
957static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
958 unsigned int nr_segs)
959{
960 rq->nr_phys_segments = nr_segs;
961 rq->__data_len = bio->bi_iter.bi_size;
962 rq->bio = rq->biotail = bio;
963 rq->ioprio = bio_prio(bio);
53ffabfd
CK
964}
965
fb01a293
ML
966void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
967 struct lock_class_key *key);
8cf7961d 968
24b83deb
CH
969static inline bool rq_is_sync(struct request *rq)
970{
971 return op_is_sync(rq->cmd_flags);
972}
973
974void blk_rq_init(struct request_queue *q, struct request *rq);
24b83deb
CH
975int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
976 struct bio_set *bs, gfp_t gfp_mask,
977 int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
978void blk_rq_unprep_clone(struct request *rq);
28db4711 979blk_status_t blk_insert_cloned_request(struct request *rq);
24b83deb
CH
980
981struct rq_map_data {
982 struct page **pages;
24b83deb 983 unsigned long offset;
f5d632d1
JA
984 unsigned short page_order;
985 unsigned short nr_entries;
986 bool null_mapped;
987 bool from_user;
24b83deb
CH
988};
989
990int blk_rq_map_user(struct request_queue *, struct request *,
991 struct rq_map_data *, void __user *, unsigned long, gfp_t);
55765402
AG
992int blk_rq_map_user_io(struct request *, struct rq_map_data *,
993 void __user *, unsigned long, gfp_t, bool, int, bool, int);
24b83deb
CH
994int blk_rq_map_user_iov(struct request_queue *, struct request *,
995 struct rq_map_data *, const struct iov_iter *, gfp_t);
996int blk_rq_unmap_user(struct bio *);
997int blk_rq_map_kern(struct request_queue *, struct request *, void *,
998 unsigned int, gfp_t);
999int blk_rq_append_bio(struct request *rq, struct bio *bio);
e2e53086 1000void blk_execute_rq_nowait(struct request *rq, bool at_head);
b84ba30b 1001blk_status_t blk_execute_rq(struct request *rq, bool at_head);
c6e99ea4 1002bool blk_rq_is_poll(struct request *rq);
24b83deb
CH
1003
1004struct req_iterator {
1005 struct bvec_iter iter;
1006 struct bio *bio;
1007};
1008
1009#define __rq_for_each_bio(_bio, rq) \
1010 if ((rq->bio)) \
1011 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
1012
1013#define rq_for_each_segment(bvl, _rq, _iter) \
1014 __rq_for_each_bio(_iter.bio, _rq) \
1015 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
1016
1017#define rq_for_each_bvec(bvl, _rq, _iter) \
1018 __rq_for_each_bio(_iter.bio, _rq) \
1019 bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
1020
1021#define rq_iter_last(bvec, _iter) \
1022 (_iter.bio->bi_next == NULL && \
1023 bio_iter_last(bvec, _iter.iter))
1024
1025/*
1026 * blk_rq_pos() : the current sector
1027 * blk_rq_bytes() : bytes left in the entire request
1028 * blk_rq_cur_bytes() : bytes left in the current segment
24b83deb
CH
1029 * blk_rq_sectors() : sectors left in the entire request
1030 * blk_rq_cur_sectors() : sectors left in the current segment
1031 * blk_rq_stats_sectors() : sectors of the entire request used for stats
1032 */
1033static inline sector_t blk_rq_pos(const struct request *rq)
1034{
1035 return rq->__sector;
1036}
1037
1038static inline unsigned int blk_rq_bytes(const struct request *rq)
1039{
1040 return rq->__data_len;
1041}
1042
1043static inline int blk_rq_cur_bytes(const struct request *rq)
1044{
b6559d8f
CH
1045 if (!rq->bio)
1046 return 0;
1047 if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
1048 return rq->bio->bi_iter.bi_size;
1049 return bio_iovec(rq->bio).bv_len;
24b83deb
CH
1050}
1051
24b83deb
CH
1052static inline unsigned int blk_rq_sectors(const struct request *rq)
1053{
1054 return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1055}
1056
1057static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1058{
1059 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1060}
1061
1062static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1063{
1064 return rq->stats_sectors;
1065}
1066
1067/*
1068 * Some commands like WRITE SAME have a payload or data transfer size which
1069 * is different from the size of the request. Any driver that supports such
1070 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1071 * calculate the data transfer size.
1072 */
1073static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1074{
1075 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1076 return rq->special_vec.bv_len;
1077 return blk_rq_bytes(rq);
1078}
1079
1080/*
1081 * Return the first full biovec in the request. The caller needs to check that
1082 * there are any bvecs before calling this helper.
1083 */
1084static inline struct bio_vec req_bvec(struct request *rq)
1085{
1086 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1087 return rq->special_vec;
1088 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1089}
1090
1091static inline unsigned int blk_rq_count_bios(struct request *rq)
1092{
1093 unsigned int nr_bios = 0;
1094 struct bio *bio;
1095
1096 __rq_for_each_bio(bio, rq)
1097 nr_bios++;
1098
1099 return nr_bios;
1100}
1101
1102void blk_steal_bios(struct bio_list *list, struct request *rq);
1103
1104/*
1105 * Request completion related functions.
1106 *
1107 * blk_update_request() completes given number of bytes and updates
1108 * the request without completing it.
1109 */
1110bool blk_update_request(struct request *rq, blk_status_t error,
1111 unsigned int nr_bytes);
1112void blk_abort_request(struct request *);
1113
1114/*
1115 * Number of physical segments as sent to the device.
1116 *
1117 * Normally this is the number of discontiguous data segments sent by the
1118 * submitter. But for data-less command like discard we might have no
1119 * actual data segments submitted, but the driver might have to add it's
1120 * own special payload. In that case we still return 1 here so that this
1121 * special payload will be mapped.
1122 */
1123static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1124{
1125 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1126 return 1;
1127 return rq->nr_phys_segments;
1128}
1129
1130/*
1131 * Number of discard segments (or ranges) the driver needs to fill in.
1132 * Each discard bio merged into a request is counted as one segment.
1133 */
1134static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1135{
1136 return max_t(unsigned short, rq->nr_phys_segments, 1);
1137}
1138
1139int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1140 struct scatterlist *sglist, struct scatterlist **last_sg);
1141static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1142 struct scatterlist *sglist)
1143{
1144 struct scatterlist *last_sg = NULL;
1145
1146 return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1147}
1148void blk_dump_rq_flags(struct request *, char *);
1149
1150#ifdef CONFIG_BLK_DEV_ZONED
1151static inline unsigned int blk_rq_zone_no(struct request *rq)
1152{
d86e716a 1153 return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
24b83deb
CH
1154}
1155
1156static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1157{
d86e716a 1158 return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
24b83deb
CH
1159}
1160
19821fee
BVA
1161/**
1162 * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization.
1163 * @rq: Request to examine.
1164 *
1165 * Note: REQ_OP_ZONE_APPEND requests do not require serialization.
1166 */
1167static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
1168{
1169 return op_needs_zoned_write_locking(req_op(rq)) &&
1170 blk_rq_zone_is_seq(rq);
1171}
1172
24b83deb
CH
1173bool blk_req_needs_zone_write_lock(struct request *rq);
1174bool blk_req_zone_write_trylock(struct request *rq);
1175void __blk_req_zone_write_lock(struct request *rq);
1176void __blk_req_zone_write_unlock(struct request *rq);
1177
1178static inline void blk_req_zone_write_lock(struct request *rq)
1179{
1180 if (blk_req_needs_zone_write_lock(rq))
1181 __blk_req_zone_write_lock(rq);
1182}
1183
1184static inline void blk_req_zone_write_unlock(struct request *rq)
1185{
1186 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1187 __blk_req_zone_write_unlock(rq);
1188}
1189
1190static inline bool blk_req_zone_is_write_locked(struct request *rq)
1191{
d86e716a
CH
1192 return rq->q->disk->seq_zones_wlock &&
1193 test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
24b83deb
CH
1194}
1195
1196static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1197{
1198 if (!blk_req_needs_zone_write_lock(rq))
1199 return true;
1200 return !blk_req_zone_is_write_locked(rq);
1201}
1202#else /* CONFIG_BLK_DEV_ZONED */
19821fee
BVA
1203static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
1204{
1205 return false;
1206}
1207
24b83deb
CH
1208static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1209{
1210 return false;
1211}
1212
1213static inline void blk_req_zone_write_lock(struct request *rq)
1214{
1215}
1216
1217static inline void blk_req_zone_write_unlock(struct request *rq)
1218{
1219}
1220static inline bool blk_req_zone_is_write_locked(struct request *rq)
1221{
1222 return false;
1223}
1224
1225static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1226{
1227 return true;
1228}
1229#endif /* CONFIG_BLK_DEV_ZONED */
1230
24b83deb 1231#endif /* BLK_MQ_H */