Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / include / linux / blk-mq.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
320ae51f
JA
2#ifndef BLK_MQ_H
3#define BLK_MQ_H
4
5#include <linux/blkdev.h>
88459642 6#include <linux/sbitmap.h>
fb01a293 7#include <linux/lockdep.h>
24b83deb 8#include <linux/scatterlist.h>
e028f167 9#include <linux/prefetch.h>
80bd4a7a 10#include <linux/srcu.h>
44981351 11#include <linux/rw_hint.h>
98e68f67 12#include <linux/rwsem.h>
320ae51f
JA
13
14struct blk_mq_tags;
f70ced09 15struct blk_flush_queue;
320ae51f 16
24b83deb 17#define BLKDEV_MIN_RQ 4
d2a27964 18#define BLKDEV_DEFAULT_RQ 128
24b83deb 19
de671d61
JA
20enum rq_end_io_ret {
21 RQ_END_IO_NONE,
22 RQ_END_IO_FREE,
23};
24
25typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
24b83deb
CH
26
27/*
28 * request flags */
29typedef __u32 __bitwise req_flags_t;
30
8a47e33f 31/* Keep rqf_name[] in sync with the definitions below */
e7112524 32enum rqf_flags {
5f89154e
JG
33 /* drive already may have started this one */
34 __RQF_STARTED,
35 /* request for flush sequence */
36 __RQF_FLUSH_SEQ,
37 /* merge of different types, fail separately */
38 __RQF_MIXED_MERGE,
39 /* don't call prep for this one */
40 __RQF_DONTPREP,
41 /* use hctx->sched_tags */
42 __RQF_SCHED_TAGS,
43 /* use an I/O scheduler for this request */
44 __RQF_USE_SCHED,
45 /* vaguely specified driver internal error. Ignored by block layer */
46 __RQF_FAILED,
47 /* don't warn about errors */
48 __RQF_QUIET,
49 /* account into disk and partition IO statistics */
50 __RQF_IO_STAT,
51 /* runtime pm request */
52 __RQF_PM,
53 /* on IO scheduler merge hash */
54 __RQF_HASHED,
55 /* track IO completion time */
56 __RQF_STATS,
57 /* Look at ->special_vec for the actual data payload instead of the
58 bio chain. */
59 __RQF_SPECIAL_PAYLOAD,
60 /* request completion needs to be signaled to zone write plugging. */
61 __RQF_ZONE_WRITE_PLUGGING,
62 /* ->timeout has been called, don't expire again */
63 __RQF_TIMED_OUT,
64 __RQF_RESV,
65 __RQF_BITS
66};
67
68#define RQF_STARTED ((__force req_flags_t)(1 << __RQF_STARTED))
69#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << __RQF_FLUSH_SEQ))
70#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << __RQF_MIXED_MERGE))
71#define RQF_DONTPREP ((__force req_flags_t)(1 << __RQF_DONTPREP))
72#define RQF_SCHED_TAGS ((__force req_flags_t)(1 << __RQF_SCHED_TAGS))
73#define RQF_USE_SCHED ((__force req_flags_t)(1 << __RQF_USE_SCHED))
74#define RQF_FAILED ((__force req_flags_t)(1 << __RQF_FAILED))
75#define RQF_QUIET ((__force req_flags_t)(1 << __RQF_QUIET))
76#define RQF_IO_STAT ((__force req_flags_t)(1 << __RQF_IO_STAT))
77#define RQF_PM ((__force req_flags_t)(1 << __RQF_PM))
78#define RQF_HASHED ((__force req_flags_t)(1 << __RQF_HASHED))
79#define RQF_STATS ((__force req_flags_t)(1 << __RQF_STATS))
80#define RQF_SPECIAL_PAYLOAD \
81 ((__force req_flags_t)(1 << __RQF_SPECIAL_PAYLOAD))
82#define RQF_ZONE_WRITE_PLUGGING \
83 ((__force req_flags_t)(1 << __RQF_ZONE_WRITE_PLUGGING))
84#define RQF_TIMED_OUT ((__force req_flags_t)(1 << __RQF_TIMED_OUT))
85#define RQF_RESV ((__force req_flags_t)(1 << __RQF_RESV))
24b83deb
CH
86
87/* flags that prevent us from merging requests: */
88#define RQF_NOMERGE_FLAGS \
9a67aa52 89 (RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
24b83deb
CH
90
91enum mq_rq_state {
92 MQ_RQ_IDLE = 0,
93 MQ_RQ_IN_FLIGHT = 1,
94 MQ_RQ_COMPLETE = 2,
95};
96
97/*
98 * Try to put the fields that are referenced together in the same cacheline.
99 *
100 * If you modify this structure, make sure to update blk_rq_init() and
101 * especially blk_mq_rq_ctx_init() to take care of the added fields.
102 */
103struct request {
104 struct request_queue *q;
105 struct blk_mq_ctx *mq_ctx;
106 struct blk_mq_hw_ctx *mq_hctx;
107
16458cf3 108 blk_opf_t cmd_flags; /* op and common flags */
24b83deb
CH
109 req_flags_t rq_flags;
110
111 int tag;
112 int internal_tag;
113
b6087629
JA
114 unsigned int timeout;
115
24b83deb
CH
116 /* the following two fields are internal, NEVER access directly */
117 unsigned int __data_len; /* total data len */
118 sector_t __sector; /* sector cursor */
119
120 struct bio *bio;
121 struct bio *biotail;
122
47c122e3
JA
123 union {
124 struct list_head queuelist;
125 struct request *rq_next;
126 };
24b83deb 127
24b83deb
CH
128 struct block_device *part;
129#ifdef CONFIG_BLK_RQ_ALLOC_TIME
130 /* Time that the first bio started allocating this request. */
131 u64 alloc_time_ns;
132#endif
133 /* Time that this request was allocated for this IO. */
134 u64 start_time_ns;
135 /* Time that I/O was submitted to the device. */
136 u64 io_start_time_ns;
137
138#ifdef CONFIG_BLK_WBT
139 unsigned short wbt_flags;
140#endif
141 /*
142 * rq sectors used for blk stats. It has the same value
143 * with blk_rq_sectors(rq), except that it never be zeroed
144 * by completion.
145 */
146 unsigned short stats_sectors;
147
148 /*
149 * Number of scatter-gather DMA addr+len pairs after
150 * physical address coalescing is performed.
151 */
152 unsigned short nr_phys_segments;
24b83deb 153 unsigned short nr_integrity_segments;
24b83deb
CH
154
155#ifdef CONFIG_BLK_INLINE_ENCRYPTION
156 struct bio_crypt_ctx *crypt_ctx;
cb77cb5a 157 struct blk_crypto_keyslot *crypt_keyslot;
24b83deb
CH
158#endif
159
24b83deb 160 enum mq_rq_state state;
0a467d0f 161 atomic_t ref;
24b83deb 162
24b83deb
CH
163 unsigned long deadline;
164
b6087629
JA
165 /*
166 * The hash is used inside the scheduler, and killed once the
167 * request reaches the dispatch list. The ipi_list is only used
168 * to queue the request for softirq completion, which is long
169 * after the request has been unhashed (and even removed from
170 * the dispatch list).
171 */
172 union {
173 struct hlist_node hash; /* merge hash */
174 struct llist_node ipi_list;
175 };
176
177 /*
178 * The rb_node is only used inside the io scheduler, requests
dc8cbb65
JA
179 * are pruned when moved to the dispatch queue. special_vec must
180 * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
181 * insert into an IO scheduler.
b6087629
JA
182 */
183 union {
184 struct rb_node rb_node; /* sort/lookup */
185 struct bio_vec special_vec;
b6087629
JA
186 };
187
b6087629
JA
188 /*
189 * Three pointers are available for the IO schedulers, if they need
be4c4278 190 * more they have to dynamically allocate it.
b6087629 191 */
be4c4278
BVA
192 struct {
193 struct io_cq *icq;
194 void *priv[2];
195 } elv;
196
197 struct {
198 unsigned int seq;
be4c4278
BVA
199 rq_end_io_fn *saved_end_io;
200 } flush;
b6087629 201
660e802c 202 u64 fifo_time;
24b83deb
CH
203
204 /*
205 * completion callback.
206 */
207 rq_end_io_fn *end_io;
208 void *end_io_data;
209};
210
2d9b02be
BVA
211static inline enum req_op req_op(const struct request *req)
212{
213 return req->cmd_flags & REQ_OP_MASK;
214}
24b83deb
CH
215
216static inline bool blk_rq_is_passthrough(struct request *rq)
217{
712fd23a 218 return blk_op_is_passthrough(rq->cmd_flags);
24b83deb
CH
219}
220
221static inline unsigned short req_get_ioprio(struct request *req)
222{
6975c1a4
CH
223 if (req->bio)
224 return req->bio->bi_ioprio;
225 return 0;
24b83deb
CH
226}
227
228#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
229
230#define rq_dma_dir(rq) \
231 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
232
a3396b99
CH
233static inline int rq_list_empty(const struct rq_list *rl)
234{
235 return rl->head == NULL;
236}
edce22e1 237
a3396b99
CH
238static inline void rq_list_init(struct rq_list *rl)
239{
240 rl->head = NULL;
241 rl->tail = NULL;
242}
243
244static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq)
245{
246 rq->rq_next = NULL;
247 if (rl->tail)
248 rl->tail->rq_next = rq;
249 else
250 rl->head = rq;
251 rl->tail = rq;
252}
253
254static inline void rq_list_add_head(struct rq_list *rl, struct request *rq)
255{
256 rq->rq_next = rl->head;
257 rl->head = rq;
258 if (!rl->tail)
259 rl->tail = rq;
260}
261
262static inline struct request *rq_list_pop(struct rq_list *rl)
263{
264 struct request *rq = rl->head;
265
266 if (rq) {
267 rl->head = rl->head->rq_next;
268 if (!rl->head)
269 rl->tail = NULL;
270 rq->rq_next = NULL;
271 }
272
273 return rq;
274}
edce22e1 275
a3396b99
CH
276static inline struct request *rq_list_peek(struct rq_list *rl)
277{
278 return rl->head;
279}
edce22e1 280
a3396b99
CH
281#define rq_list_for_each(rl, pos) \
282 for (pos = rq_list_peek((rl)); (pos); pos = pos->rq_next)
3764fd05 283
a3396b99
CH
284#define rq_list_for_each_safe(rl, pos, nxt) \
285 for (pos = rq_list_peek((rl)), nxt = pos->rq_next; \
286 pos; pos = nxt, nxt = pos ? pos->rq_next : NULL)
edce22e1 287
b2bed51a
BVA
288/**
289 * enum blk_eh_timer_return - How the timeout handler should proceed
290 * @BLK_EH_DONE: The block driver completed the command or will complete it at
291 * a later time.
292 * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
293 * request to complete.
294 */
24b83deb 295enum blk_eh_timer_return {
b2bed51a
BVA
296 BLK_EH_DONE,
297 BLK_EH_RESET_TIMER,
24b83deb
CH
298};
299
fe644072 300/**
d386732b
AA
301 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
302 * block device
fe644072 303 */
320ae51f
JA
304struct blk_mq_hw_ctx {
305 struct {
d386732b 306 /** @lock: Protects the dispatch list. */
320ae51f 307 spinlock_t lock;
d386732b
AA
308 /**
309 * @dispatch: Used for requests that are ready to be
310 * dispatched to the hardware but for some reason (e.g. lack of
311 * resources) could not be sent to the hardware. As soon as the
312 * driver can send new requests, requests at this list will
313 * be sent first for a fairer dispatch.
314 */
320ae51f 315 struct list_head dispatch;
d386732b
AA
316 /**
317 * @state: BLK_MQ_S_* flags. Defines the state of the hw
318 * queue (active, scheduled to restart, stopped).
319 */
320 unsigned long state;
320ae51f
JA
321 } ____cacheline_aligned_in_smp;
322
d386732b
AA
323 /**
324 * @run_work: Used for scheduling a hardware queue run at a later time.
325 */
9f993737 326 struct delayed_work run_work;
d386732b 327 /** @cpumask: Map of available CPUs where this hctx can run. */
e4043dcf 328 cpumask_var_t cpumask;
d386732b
AA
329 /**
330 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
331 * selection from @cpumask.
332 */
506e931f 333 int next_cpu;
d386732b
AA
334 /**
335 * @next_cpu_batch: Counter of how many works left in the batch before
336 * changing to the next CPU.
337 */
506e931f 338 int next_cpu_batch;
320ae51f 339
d386732b
AA
340 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
341 unsigned long flags;
320ae51f 342
d386732b
AA
343 /**
344 * @sched_data: Pointer owned by the IO scheduler attached to a request
345 * queue. It's up to the IO scheduler how to use this pointer.
346 */
bd166ef1 347 void *sched_data;
d386732b
AA
348 /**
349 * @queue: Pointer to the request queue that owns this hardware context.
350 */
320ae51f 351 struct request_queue *queue;
d386732b 352 /** @fq: Queue of requests that need to perform a flush operation. */
f70ced09 353 struct blk_flush_queue *fq;
320ae51f 354
d386732b
AA
355 /**
356 * @driver_data: Pointer to data owned by the block driver that created
357 * this hctx
358 */
320ae51f
JA
359 void *driver_data;
360
d386732b
AA
361 /**
362 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
363 * pending request in that software queue.
364 */
88459642 365 struct sbitmap ctx_map;
1429d7c9 366
d386732b
AA
367 /**
368 * @dispatch_from: Software queue to be used when no scheduler was
369 * selected.
370 */
b347689f 371 struct blk_mq_ctx *dispatch_from;
d386732b
AA
372 /**
373 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
374 * decide if the hw_queue is busy using Exponential Weighted Moving
375 * Average algorithm.
376 */
6e768717 377 unsigned int dispatch_busy;
b347689f 378
d386732b 379 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
f31967f0 380 unsigned short type;
d386732b 381 /** @nr_ctx: Number of software queues. */
f31967f0 382 unsigned short nr_ctx;
d386732b 383 /** @ctxs: Array of software queues. */
6e768717 384 struct blk_mq_ctx **ctxs;
4bb659b1 385
d386732b 386 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
5815839b 387 spinlock_t dispatch_wait_lock;
d386732b
AA
388 /**
389 * @dispatch_wait: Waitqueue to put requests when there is no tag
390 * available at the moment, to wait for another try in the future.
391 */
eb619fdb 392 wait_queue_entry_t dispatch_wait;
d386732b
AA
393
394 /**
395 * @wait_index: Index of next available dispatch_wait queue to insert
396 * requests.
397 */
8537b120 398 atomic_t wait_index;
320ae51f 399
d386732b
AA
400 /**
401 * @tags: Tags owned by the block driver. A tag at this set is only
402 * assigned when a request is dispatched from a hardware queue.
403 */
320ae51f 404 struct blk_mq_tags *tags;
d386732b
AA
405 /**
406 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
407 * scheduler associated with a request queue, a tag is assigned when
408 * that request is allocated. Else, this member is not used.
409 */
bd166ef1 410 struct blk_mq_tags *sched_tags;
320ae51f 411
d386732b 412 /** @numa_node: NUMA node the storage adapter has been connected to. */
320ae51f 413 unsigned int numa_node;
d386732b 414 /** @queue_num: Index of this hardware queue. */
17ded320 415 unsigned int queue_num;
320ae51f 416
d386732b
AA
417 /**
418 * @nr_active: Number of active requests. Only used when a tag set is
419 * shared across request queues.
420 */
0d2602ca
JA
421 atomic_t nr_active;
422
bf0beec0
ML
423 /** @cpuhp_online: List to store request if CPU is going to die */
424 struct hlist_node cpuhp_online;
d386732b 425 /** @cpuhp_dead: List to store request if some CPU die. */
9467f859 426 struct hlist_node cpuhp_dead;
d386732b 427 /** @kobj: Kernel object for sysfs. */
320ae51f 428 struct kobject kobj;
05229bee 429
9c1051aa 430#ifdef CONFIG_BLK_DEBUG_FS
d386732b
AA
431 /**
432 * @debugfs_dir: debugfs directory for this hardware queue. Named
433 * as cpu<cpu_number>.
434 */
9c1051aa 435 struct dentry *debugfs_dir;
d386732b 436 /** @sched_debugfs_dir: debugfs directory for the scheduler. */
d332ce09 437 struct dentry *sched_debugfs_dir;
9c1051aa 438#endif
07319678 439
2dd209f0
BVA
440 /**
441 * @hctx_list: if this hctx is not in use, this is an entry in
442 * q->unused_hctx_list.
443 */
2f8f1336 444 struct list_head hctx_list;
320ae51f
JA
445};
446
7a18312c 447/**
d386732b 448 * struct blk_mq_queue_map - Map software queues to hardware queues
7a18312c
BVA
449 * @mq_map: CPU ID to hardware queue index map. This is an array
450 * with nr_cpu_ids elements. Each element has a value in the range
451 * [@queue_offset, @queue_offset + @nr_queues).
452 * @nr_queues: Number of hardware queues to map CPU IDs onto.
453 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
454 * driver to map each hardware queue type (enum hctx_type) onto a distinct
455 * set of hardware queues.
456 */
ed76e329
JA
457struct blk_mq_queue_map {
458 unsigned int *mq_map;
459 unsigned int nr_queues;
843477d4 460 unsigned int queue_offset;
ed76e329
JA
461};
462
d386732b
AA
463/**
464 * enum hctx_type - Type of hardware queue
465 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
466 * @HCTX_TYPE_READ: Just for READ I/O.
467 * @HCTX_TYPE_POLL: Polled I/O of any kind.
468 * @HCTX_MAX_TYPES: Number of types of hctx.
469 */
e20ba6e1 470enum hctx_type {
d386732b
AA
471 HCTX_TYPE_DEFAULT,
472 HCTX_TYPE_READ,
473 HCTX_TYPE_POLL,
e20ba6e1
CH
474
475 HCTX_MAX_TYPES,
ed76e329
JA
476};
477
7a18312c
BVA
478/**
479 * struct blk_mq_tag_set - tag set that can be shared between request queues
d88cbbb3 480 * @ops: Pointers to functions that implement block driver behavior.
7a18312c
BVA
481 * @map: One or more ctx -> hctx mappings. One map exists for each
482 * hardware queue type (enum hctx_type) that the driver wishes
483 * to support. There are no restrictions on maps being of the
484 * same size, and it's perfectly legal to share maps between
485 * types.
486 * @nr_maps: Number of elements in the @map array. A number in the range
487 * [1, HCTX_MAX_TYPES].
7a18312c
BVA
488 * @nr_hw_queues: Number of hardware queues supported by the block driver that
489 * owns this data structure.
490 * @queue_depth: Number of tags per hardware queue, reserved tags included.
491 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
492 * allocations.
493 * @cmd_size: Number of additional bytes to allocate per request. The block
494 * driver owns these additional bytes.
495 * @numa_node: NUMA node the storage adapter has been connected to.
496 * @timeout: Request processing timeout in jiffies.
497 * @flags: Zero or more BLK_MQ_F_* flags.
498 * @driver_data: Pointer to data owned by the block driver that created this
499 * tag set.
500 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
501 * elements.
079a2e3e
JG
502 * @shared_tags:
503 * Shared set of tags. Has @nr_hw_queues elements. If set,
504 * shared by all @tags.
7a18312c
BVA
505 * @tag_list_lock: Serializes tag_list accesses.
506 * @tag_list: List of the request queues that use this tag set. See also
507 * request_queue.tag_set_list.
80bd4a7a
CH
508 * @srcu: Use as lock when type of the request queue is blocking
509 * (BLK_MQ_F_BLOCKING).
f31acff0
ML
510 * @update_nr_hwq_lock:
511 * Synchronize updating nr_hw_queues with add/del disk &
512 * switching elevator.
7a18312c 513 */
24d2f903 514struct blk_mq_tag_set {
d88cbbb3 515 const struct blk_mq_ops *ops;
ed76e329 516 struct blk_mq_queue_map map[HCTX_MAX_TYPES];
7a18312c 517 unsigned int nr_maps;
7a18312c
BVA
518 unsigned int nr_hw_queues;
519 unsigned int queue_depth;
320ae51f 520 unsigned int reserved_tags;
7a18312c 521 unsigned int cmd_size;
320ae51f
JA
522 int numa_node;
523 unsigned int timeout;
7a18312c 524 unsigned int flags;
24d2f903
CH
525 void *driver_data;
526
527 struct blk_mq_tags **tags;
0d2602ca 528
079a2e3e 529 struct blk_mq_tags *shared_tags;
e155b0c2 530
0d2602ca
JA
531 struct mutex tag_list_lock;
532 struct list_head tag_list;
80bd4a7a 533 struct srcu_struct *srcu;
98e68f67
ML
534
535 struct rw_semaphore update_nr_hwq_lock;
320ae51f
JA
536};
537
d386732b
AA
538/**
539 * struct blk_mq_queue_data - Data about a request inserted in a queue
540 *
541 * @rq: Request pointer.
542 * @last: If it is the last request in the queue.
543 */
74c45052
JA
544struct blk_mq_queue_data {
545 struct request *rq;
74c45052
JA
546 bool last;
547};
548
2dd6532e 549typedef bool (busy_tag_iter_fn)(struct request *, void *);
05229bee 550
d386732b
AA
551/**
552 * struct blk_mq_ops - Callback functions that implements block driver
553 * behaviour.
554 */
320ae51f 555struct blk_mq_ops {
d386732b
AA
556 /**
557 * @queue_rq: Queue a new request from block IO.
320ae51f 558 */
0516c2f6
DW
559 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
560 const struct blk_mq_queue_data *);
320ae51f 561
d386732b
AA
562 /**
563 * @commit_rqs: If a driver uses bd->last to judge when to submit
564 * requests to hardware, it must define this function. In case of errors
565 * that make us stop issuing further requests, this hook serves the
d666ba98
JA
566 * purpose of kicking the hardware (which the last request otherwise
567 * would have done).
568 */
0516c2f6 569 void (*commit_rqs)(struct blk_mq_hw_ctx *);
d666ba98 570
3c67d44d
JA
571 /**
572 * @queue_rqs: Queue a list of new requests. Driver is guaranteed
573 * that each request belongs to the same queue. If the driver doesn't
574 * empty the @rqlist completely, then the rest will be queued
575 * individually by the block layer upon return.
576 */
a3396b99 577 void (*queue_rqs)(struct rq_list *rqlist);
3c67d44d 578
d386732b
AA
579 /**
580 * @get_budget: Reserve budget before queue request, once .queue_rq is
de148297
ML
581 * run, it is driver's responsibility to release the
582 * reserved budget. Also we have to handle failure case
583 * of .get_budget for avoiding I/O deadlock.
584 */
2a5a24aa 585 int (*get_budget)(struct request_queue *);
0516c2f6 586
d386732b
AA
587 /**
588 * @put_budget: Release the reserved budget.
589 */
2a5a24aa 590 void (*put_budget)(struct request_queue *, int);
de148297 591
85367040
ML
592 /**
593 * @set_rq_budget_token: store rq's budget token
d022d18c
ML
594 */
595 void (*set_rq_budget_token)(struct request *, int);
85367040
ML
596 /**
597 * @get_rq_budget_token: retrieve rq's budget token
d022d18c
ML
598 */
599 int (*get_rq_budget_token)(struct request *);
600
d386732b
AA
601 /**
602 * @timeout: Called on request timeout.
320ae51f 603 */
9bdb4833 604 enum blk_eh_timer_return (*timeout)(struct request *);
320ae51f 605
d386732b
AA
606 /**
607 * @poll: Called to poll for completion of a specific tag.
05229bee 608 */
5a72e899 609 int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
05229bee 610
d386732b
AA
611 /**
612 * @complete: Mark the request as complete.
613 */
0516c2f6 614 void (*complete)(struct request *);
30a91cb4 615
d386732b
AA
616 /**
617 * @init_hctx: Called when the block layer side of a hardware queue has
618 * been set up, allowing the driver to allocate/init matching
619 * structures.
320ae51f 620 */
0516c2f6 621 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
d386732b
AA
622 /**
623 * @exit_hctx: Ditto for exit/teardown.
624 */
0516c2f6 625 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
e9b267d9 626
d386732b
AA
627 /**
628 * @init_request: Called for every command allocated by the block layer
629 * to allow the driver to set up driver specific data.
f70ced09
ML
630 *
631 * Tag greater than or equal to queue_depth is for setting up
632 * flush request.
e9b267d9 633 */
0516c2f6
DW
634 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
635 unsigned int, unsigned int);
d386732b
AA
636 /**
637 * @exit_request: Ditto for exit/teardown.
638 */
0516c2f6
DW
639 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
640 unsigned int);
d386732b 641
d386732b
AA
642 /**
643 * @cleanup_rq: Called before freeing one request which isn't completed
644 * yet, and usually for freeing the driver private data.
226b4fc7 645 */
0516c2f6 646 void (*cleanup_rq)(struct request *);
226b4fc7 647
d386732b
AA
648 /**
649 * @busy: If set, returns whether or not this queue currently is busy.
9ba20527 650 */
0516c2f6 651 bool (*busy)(struct request_queue *);
9ba20527 652
d386732b
AA
653 /**
654 * @map_queues: This allows drivers specify their own queue mapping by
655 * overriding the setup-time function that builds the mq_map.
656 */
a4e1d0b7 657 void (*map_queues)(struct blk_mq_tag_set *set);
2836ee4b
BVA
658
659#ifdef CONFIG_BLK_DEBUG_FS
d386732b
AA
660 /**
661 * @show_rq: Used by the debugfs implementation to show driver-specific
2836ee4b
BVA
662 * information about a request.
663 */
664 void (*show_rq)(struct seq_file *m, struct request *rq);
665#endif
320ae51f
JA
666};
667
226f0f6a 668/* Keep hctx_flag_name[] in sync with the definitions below */
320ae51f 669enum {
51db1c37 670 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
bf0beec0
ML
671 /*
672 * Set when this device requires underlying blk-mq device for
673 * completing IO:
674 */
675 BLK_MQ_F_STACKING = 1 << 2,
32bc15af 676 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
226f0f6a 677 BLK_MQ_F_BLOCKING = 1 << 4,
226f0f6a 678
ce32496e
CH
679 /*
680 * Alloc tags on a round-robin base instead of the first available one.
681 */
682 BLK_MQ_F_TAG_RR = 1 << 5,
683
90b71980
BVA
684 /*
685 * Select 'none' during queue registration in case of a single hwq
686 * or shared hwqs instead of 'mq-deadline'.
687 */
226f0f6a 688 BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 6,
ce32496e
CH
689
690 BLK_MQ_F_MAX = 1 << 7,
320ae51f
JA
691};
692
793356d2 693#define BLK_MQ_MAX_DEPTH (10240)
e155b0c2
JG
694#define BLK_MQ_NO_HCTX_IDX (-1U)
695
23827310
JG
696enum {
697 /* Keep hctx_state_name[] in sync with the definitions below */
698 BLK_MQ_S_STOPPED,
699 BLK_MQ_S_TAG_ACTIVE,
700 BLK_MQ_S_SCHED_RESTART,
701 /* hw queue is inactive after all its CPUs become offline */
702 BLK_MQ_S_INACTIVE,
703 BLK_MQ_S_MAX
704};
705
27e32cd2
CH
706struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
707 struct queue_limits *lim, void *queuedata,
4dcc4874 708 struct lock_class_key *lkclass);
27e32cd2 709#define blk_mq_alloc_disk(set, lim, queuedata) \
b461dfc4
CH
710({ \
711 static struct lock_class_key __key; \
b461dfc4 712 \
27e32cd2 713 __blk_mq_alloc_disk(set, lim, queuedata, &__key); \
b461dfc4 714})
6f8191fd
CH
715struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
716 struct lock_class_key *lkclass);
9ac4dd8c
CH
717struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
718 struct queue_limits *lim, void *queuedata);
26a9750a
CH
719int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
720 struct request_queue *q);
6f8191fd 721void blk_mq_destroy_queue(struct request_queue *);
320ae51f 722
24d2f903 723int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
cdb14e0f
CH
724int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
725 const struct blk_mq_ops *ops, unsigned int queue_depth,
726 unsigned int set_flags);
24d2f903
CH
727void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
728
320ae51f 729void blk_mq_free_request(struct request *rq);
f6c80cff
KB
730int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
731 unsigned int poll_flags);
6f3b0e8b 732
3c94d83c 733bool blk_mq_queue_inflight(struct request_queue *q);
ae879912 734
6f3b0e8b 735enum {
9a95e4ef
BVA
736 /* return when out of requests */
737 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
738 /* allocate from reserved pool */
739 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
0854bcdc
BVA
740 /* set RQF_PM */
741 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
6f3b0e8b
CH
742};
743
16458cf3 744struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
9a95e4ef 745 blk_mq_req_flags_t flags);
cd6ce148 746struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
16458cf3 747 blk_opf_t opf, blk_mq_req_flags_t flags,
9a95e4ef 748 unsigned int hctx_idx);
e028f167
JA
749
750/*
751 * Tag address space map.
752 */
753struct blk_mq_tags {
754 unsigned int nr_tags;
755 unsigned int nr_reserved_tags;
4f1731df 756 unsigned int active_queues;
e028f167
JA
757
758 struct sbitmap_queue bitmap_tags;
759 struct sbitmap_queue breserved_tags;
760
761 struct request **rqs;
762 struct request **static_rqs;
763 struct list_head page_list;
764
765 /*
766 * used to clear request reference in rqs[] before freeing one
767 * request pool
768 */
769 spinlock_t lock;
770};
771
772static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
773 unsigned int tag)
774{
775 if (tag < tags->nr_tags) {
776 prefetch(tags->rqs[tag]);
777 return tags->rqs[tag];
778 }
779
780 return NULL;
781}
320ae51f 782
205fb5f5
BVA
783enum {
784 BLK_MQ_UNIQUE_TAG_BITS = 16,
785 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
786};
787
788u32 blk_mq_unique_tag(struct request *rq);
789
790static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
791{
792 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
793}
794
795static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
796{
797 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
798}
799
27a46989
PB
800/**
801 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
802 * @rq: target request.
803 */
804static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
805{
806 return READ_ONCE(rq->state);
807}
808
809static inline int blk_mq_request_started(struct request *rq)
810{
811 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
812}
813
814static inline int blk_mq_request_completed(struct request *rq)
815{
816 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
817}
320ae51f 818
83fba8c8
CL
819/*
820 *
821 * Set the state to complete when completing a request from inside ->queue_rq.
822 * This is used by drivers that want to ensure special complete actions that
823 * need access to the request are called on failure, e.g. by nvme for
824 * multipathing.
825 */
826static inline void blk_mq_set_request_complete(struct request *rq)
827{
828 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
829}
830
e8dc17e2
SAS
831/*
832 * Complete the request directly instead of deferring it to softirq or
833 * completing it another CPU. Useful in preemptible instead of an interrupt.
834 */
835static inline void blk_mq_complete_request_direct(struct request *rq,
836 void (*complete)(struct request *rq))
837{
838 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
839 complete(rq);
840}
841
e2490073 842void blk_mq_start_request(struct request *rq);
2a842aca
CH
843void blk_mq_end_request(struct request *rq, blk_status_t error);
844void __blk_mq_end_request(struct request *rq, blk_status_t error);
f794f335
JA
845void blk_mq_end_request_batch(struct io_comp_batch *ib);
846
847/*
848 * Only need start/end time stamping if we have iostat or
849 * blk stats enabled, or using an IO scheduler.
850 */
851static inline bool blk_mq_need_time_stamp(struct request *rq)
852{
dd6216bb 853 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
f794f335
JA
854}
855
99e48cd6
JG
856static inline bool blk_mq_is_reserved_rq(struct request *rq)
857{
858 return rq->rq_flags & RQF_RESV;
859}
860
9bce6b5f
SK
861/**
862 * blk_mq_add_to_batch() - add a request to the completion batch
863 * @req: The request to add to batch
864 * @iob: The batch to add the request
865 * @is_error: Specify true if the request failed with an error
866 * @complete: The completaion handler for the request
867 *
f794f335
JA
868 * Batched completions only work when there is no I/O error and no special
869 * ->end_io handler.
9bce6b5f
SK
870 *
871 * Return: true when the request was added to the batch, otherwise false
f794f335
JA
872 */
873static inline bool blk_mq_add_to_batch(struct request *req,
9bce6b5f 874 struct io_comp_batch *iob, bool is_error,
f794f335
JA
875 void (*complete)(struct io_comp_batch *))
876{
c6b7a3a2 877 /*
1f47ed29
JA
878 * Check various conditions that exclude batch processing:
879 * 1) No batch container
880 * 2) Has scheduler data attached
881 * 3) Not a passthrough request and end_io set
9bce6b5f 882 * 4) Not a passthrough request and failed with an error
c6b7a3a2 883 */
1f47ed29 884 if (!iob)
f794f335 885 return false;
1f47ed29
JA
886 if (req->rq_flags & RQF_SCHED_TAGS)
887 return false;
888 if (!blk_rq_is_passthrough(req)) {
889 if (req->end_io)
890 return false;
9bce6b5f 891 if (is_error)
1f47ed29
JA
892 return false;
893 }
ab3e1d3b 894
f794f335
JA
895 if (!iob->complete)
896 iob->complete = complete;
897 else if (iob->complete != complete)
898 return false;
899 iob->need_ts |= blk_mq_need_time_stamp(req);
00e8d290 900 rq_list_add_tail(&iob->req_list, req);
f794f335
JA
901 return true;
902}
320ae51f 903
2b053aca 904void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
6fca6a61 905void blk_mq_kick_requeue_list(struct request_queue *q);
2849450a 906void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
15f73f5b 907void blk_mq_complete_request(struct request *rq);
40d09b53 908bool blk_mq_complete_request_remote(struct request *rq);
320ae51f
JA
909void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
910void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
280d45f6 911void blk_mq_stop_hw_queues(struct request_queue *q);
2f268556 912void blk_mq_start_hw_queues(struct request_queue *q);
ae911c5e 913void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
1b4a3258 914void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
97e01209 915void blk_mq_quiesce_queue(struct request_queue *q);
483239c7 916void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
414dd48e
CL
917void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
918void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
e4e73913 919void blk_mq_unquiesce_queue(struct request_queue *q);
7587a5ae 920void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
626fb735 921void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
b94ec296 922void blk_mq_run_hw_queues(struct request_queue *q, bool async);
b9151e7b 923void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
e0489487
SG
924void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
925 busy_tag_iter_fn *fn, void *priv);
f9934a80 926void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
1e1a9cec
CH
927void blk_mq_freeze_queue_nomemsave(struct request_queue *q);
928void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q);
929static inline unsigned int __must_check
930blk_mq_freeze_queue(struct request_queue *q)
931{
932 unsigned int memflags = memalloc_noio_save();
933
934 blk_mq_freeze_queue_nomemsave(q);
935 return memflags;
936}
937static inline void
938blk_mq_unfreeze_queue(struct request_queue *q, unsigned int memflags)
939{
940 blk_mq_unfreeze_queue_nomemrestore(q);
941 memalloc_noio_restore(memflags);
942}
1671d522 943void blk_freeze_queue_start(struct request_queue *q);
6bae363e 944void blk_mq_freeze_queue_wait(struct request_queue *q);
f91328c4
KB
945int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
946 unsigned long timeout);
8acdd0e7
ML
947void blk_mq_unfreeze_queue_non_owner(struct request_queue *q);
948void blk_freeze_queue_start_non_owner(struct request_queue *q);
320ae51f 949
a4e1d0b7 950void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
1452e9b4
DW
951void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
952 struct device *dev, unsigned int offset);
868f2f0b
KB
953void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
954
852ec809 955void blk_mq_quiesce_queue_nowait(struct request_queue *q);
4f084b41 956
9cf2bab6
JA
957unsigned int blk_mq_rq_cpu(struct request *rq);
958
15f73f5b
CH
959bool __blk_should_fake_timeout(struct request_queue *q);
960static inline bool blk_should_fake_timeout(struct request_queue *q)
961{
962 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
963 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
964 return __blk_should_fake_timeout(q);
965 return false;
966}
967
d386732b
AA
968/**
969 * blk_mq_rq_from_pdu - cast a PDU to a request
970 * @pdu: the PDU (Protocol Data Unit) to be casted
971 *
972 * Return: request
973 *
320ae51f 974 * Driver command data is immediately after the request. So subtract request
d386732b 975 * size to get back to the original request.
320ae51f
JA
976 */
977static inline struct request *blk_mq_rq_from_pdu(void *pdu)
978{
979 return pdu - sizeof(struct request);
980}
d386732b
AA
981
982/**
983 * blk_mq_rq_to_pdu - cast a request to a PDU
984 * @rq: the request to be casted
985 *
986 * Return: pointer to the PDU
987 *
988 * Driver command data is immediately after the request. So add request to get
989 * the PDU.
990 */
320ae51f
JA
991static inline void *blk_mq_rq_to_pdu(struct request *rq)
992{
2963e3f7 993 return rq + 1;
320ae51f
JA
994}
995
320ae51f 996#define queue_for_each_hw_ctx(q, hctx, i) \
4e5cc99e 997 xa_for_each(&(q)->hctx_table, (i), (hctx))
320ae51f 998
320ae51f 999#define hctx_for_each_ctx(hctx, ctx, i) \
0d0b7d42
JA
1000 for ((i) = 0; (i) < (hctx)->nr_ctx && \
1001 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
320ae51f 1002
226b4fc7
ML
1003static inline void blk_mq_cleanup_rq(struct request *rq)
1004{
1005 if (rq->q->mq_ops->cleanup_rq)
1006 rq->q->mq_ops->cleanup_rq(rq);
1007}
1008
fb01a293
ML
1009void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
1010 struct lock_class_key *key);
8cf7961d 1011
24b83deb
CH
1012static inline bool rq_is_sync(struct request *rq)
1013{
1014 return op_is_sync(rq->cmd_flags);
1015}
1016
1017void blk_rq_init(struct request_queue *q, struct request *rq);
24b83deb
CH
1018int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1019 struct bio_set *bs, gfp_t gfp_mask,
1020 int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
1021void blk_rq_unprep_clone(struct request *rq);
28db4711 1022blk_status_t blk_insert_cloned_request(struct request *rq);
24b83deb
CH
1023
1024struct rq_map_data {
1025 struct page **pages;
24b83deb 1026 unsigned long offset;
f5d632d1
JA
1027 unsigned short page_order;
1028 unsigned short nr_entries;
1029 bool null_mapped;
1030 bool from_user;
24b83deb
CH
1031};
1032
1033int blk_rq_map_user(struct request_queue *, struct request *,
1034 struct rq_map_data *, void __user *, unsigned long, gfp_t);
55765402
AG
1035int blk_rq_map_user_io(struct request *, struct rq_map_data *,
1036 void __user *, unsigned long, gfp_t, bool, int, bool, int);
24b83deb
CH
1037int blk_rq_map_user_iov(struct request_queue *, struct request *,
1038 struct rq_map_data *, const struct iov_iter *, gfp_t);
1039int blk_rq_unmap_user(struct bio *);
af78428e
CH
1040int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len,
1041 gfp_t gfp);
24b83deb 1042int blk_rq_append_bio(struct request *rq, struct bio *bio);
e2e53086 1043void blk_execute_rq_nowait(struct request *rq, bool at_head);
b84ba30b 1044blk_status_t blk_execute_rq(struct request *rq, bool at_head);
c6e99ea4 1045bool blk_rq_is_poll(struct request *rq);
24b83deb
CH
1046
1047struct req_iterator {
1048 struct bvec_iter iter;
1049 struct bio *bio;
1050};
1051
1052#define __rq_for_each_bio(_bio, rq) \
1053 if ((rq->bio)) \
1054 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
1055
1056#define rq_for_each_segment(bvl, _rq, _iter) \
1057 __rq_for_each_bio(_iter.bio, _rq) \
1058 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
1059
1060#define rq_for_each_bvec(bvl, _rq, _iter) \
1061 __rq_for_each_bio(_iter.bio, _rq) \
1062 bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
1063
1064#define rq_iter_last(bvec, _iter) \
1065 (_iter.bio->bi_next == NULL && \
1066 bio_iter_last(bvec, _iter.iter))
1067
1068/*
1069 * blk_rq_pos() : the current sector
1070 * blk_rq_bytes() : bytes left in the entire request
1071 * blk_rq_cur_bytes() : bytes left in the current segment
24b83deb
CH
1072 * blk_rq_sectors() : sectors left in the entire request
1073 * blk_rq_cur_sectors() : sectors left in the current segment
1074 * blk_rq_stats_sectors() : sectors of the entire request used for stats
1075 */
1076static inline sector_t blk_rq_pos(const struct request *rq)
1077{
1078 return rq->__sector;
1079}
1080
1081static inline unsigned int blk_rq_bytes(const struct request *rq)
1082{
1083 return rq->__data_len;
1084}
1085
1086static inline int blk_rq_cur_bytes(const struct request *rq)
1087{
b6559d8f
CH
1088 if (!rq->bio)
1089 return 0;
1090 if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
1091 return rq->bio->bi_iter.bi_size;
1092 return bio_iovec(rq->bio).bv_len;
24b83deb
CH
1093}
1094
24b83deb
CH
1095static inline unsigned int blk_rq_sectors(const struct request *rq)
1096{
1097 return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1098}
1099
1100static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1101{
1102 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1103}
1104
1105static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1106{
1107 return rq->stats_sectors;
1108}
1109
1110/*
1111 * Some commands like WRITE SAME have a payload or data transfer size which
1112 * is different from the size of the request. Any driver that supports such
1113 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1114 * calculate the data transfer size.
1115 */
1116static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1117{
1118 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1119 return rq->special_vec.bv_len;
1120 return blk_rq_bytes(rq);
1121}
1122
1123/*
1124 * Return the first full biovec in the request. The caller needs to check that
1125 * there are any bvecs before calling this helper.
1126 */
1127static inline struct bio_vec req_bvec(struct request *rq)
1128{
1129 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1130 return rq->special_vec;
1131 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1132}
1133
1134static inline unsigned int blk_rq_count_bios(struct request *rq)
1135{
1136 unsigned int nr_bios = 0;
1137 struct bio *bio;
1138
1139 __rq_for_each_bio(bio, rq)
1140 nr_bios++;
1141
1142 return nr_bios;
1143}
1144
1145void blk_steal_bios(struct bio_list *list, struct request *rq);
1146
1147/*
1148 * Request completion related functions.
1149 *
1150 * blk_update_request() completes given number of bytes and updates
1151 * the request without completing it.
1152 */
1153bool blk_update_request(struct request *rq, blk_status_t error,
1154 unsigned int nr_bytes);
1155void blk_abort_request(struct request *);
1156
1157/*
1158 * Number of physical segments as sent to the device.
1159 *
1160 * Normally this is the number of discontiguous data segments sent by the
1161 * submitter. But for data-less command like discard we might have no
1162 * actual data segments submitted, but the driver might have to add it's
1163 * own special payload. In that case we still return 1 here so that this
1164 * special payload will be mapped.
1165 */
1166static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1167{
1168 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1169 return 1;
1170 return rq->nr_phys_segments;
1171}
1172
1173/*
1174 * Number of discard segments (or ranges) the driver needs to fill in.
1175 * Each discard bio merged into a request is counted as one segment.
1176 */
1177static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1178{
1179 return max_t(unsigned short, rq->nr_phys_segments, 1);
1180}
1181
75618ac6
AG
1182int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
1183 struct scatterlist **last_sg);
1184static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
24b83deb
CH
1185{
1186 struct scatterlist *last_sg = NULL;
1187
75618ac6 1188 return __blk_rq_map_sg(rq, sglist, &last_sg);
24b83deb
CH
1189}
1190void blk_dump_rq_flags(struct request *, char *);
1191
24b83deb 1192#endif /* BLK_MQ_H */