1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blkdev.h>
6 #include <linux/sbitmap.h>
7 #include <linux/srcu.h>
8 #include <linux/lockdep.h>
11 struct blk_flush_queue;
14 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
17 struct blk_mq_hw_ctx {
19 /** @lock: Protects the dispatch list. */
22 * @dispatch: Used for requests that are ready to be
23 * dispatched to the hardware but for some reason (e.g. lack of
24 * resources) could not be sent to the hardware. As soon as the
25 * driver can send new requests, requests at this list will
26 * be sent first for a fairer dispatch.
28 struct list_head dispatch;
30 * @state: BLK_MQ_S_* flags. Defines the state of the hw
31 * queue (active, scheduled to restart, stopped).
34 } ____cacheline_aligned_in_smp;
37 * @run_work: Used for scheduling a hardware queue run at a later time.
39 struct delayed_work run_work;
40 /** @cpumask: Map of available CPUs where this hctx can run. */
41 cpumask_var_t cpumask;
43 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
44 * selection from @cpumask.
48 * @next_cpu_batch: Counter of how many works left in the batch before
49 * changing to the next CPU.
53 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
57 * @sched_data: Pointer owned by the IO scheduler attached to a request
58 * queue. It's up to the IO scheduler how to use this pointer.
62 * @queue: Pointer to the request queue that owns this hardware context.
64 struct request_queue *queue;
65 /** @fq: Queue of requests that need to perform a flush operation. */
66 struct blk_flush_queue *fq;
69 * @driver_data: Pointer to data owned by the block driver that created
75 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
76 * pending request in that software queue.
78 struct sbitmap ctx_map;
81 * @dispatch_from: Software queue to be used when no scheduler was
84 struct blk_mq_ctx *dispatch_from;
86 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
87 * decide if the hw_queue is busy using Exponential Weighted Moving
90 unsigned int dispatch_busy;
92 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
94 /** @nr_ctx: Number of software queues. */
95 unsigned short nr_ctx;
96 /** @ctxs: Array of software queues. */
97 struct blk_mq_ctx **ctxs;
99 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
100 spinlock_t dispatch_wait_lock;
102 * @dispatch_wait: Waitqueue to put requests when there is no tag
103 * available at the moment, to wait for another try in the future.
105 wait_queue_entry_t dispatch_wait;
108 * @wait_index: Index of next available dispatch_wait queue to insert
114 * @tags: Tags owned by the block driver. A tag at this set is only
115 * assigned when a request is dispatched from a hardware queue.
117 struct blk_mq_tags *tags;
119 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
120 * scheduler associated with a request queue, a tag is assigned when
121 * that request is allocated. Else, this member is not used.
123 struct blk_mq_tags *sched_tags;
125 /** @queued: Number of queued requests. */
126 unsigned long queued;
127 /** @run: Number of dispatched requests. */
129 #define BLK_MQ_MAX_DISPATCH_ORDER 7
130 /** @dispatched: Number of dispatch requests by queue. */
131 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
133 /** @numa_node: NUMA node the storage adapter has been connected to. */
134 unsigned int numa_node;
135 /** @queue_num: Index of this hardware queue. */
136 unsigned int queue_num;
139 * @nr_active: Number of active requests. Only used when a tag set is
140 * shared across request queues.
144 /** @cpuhp_online: List to store request if CPU is going to die */
145 struct hlist_node cpuhp_online;
146 /** @cpuhp_dead: List to store request if some CPU die. */
147 struct hlist_node cpuhp_dead;
148 /** @kobj: Kernel object for sysfs. */
151 /** @poll_considered: Count times blk_poll() was called. */
152 unsigned long poll_considered;
153 /** @poll_invoked: Count how many requests blk_poll() polled. */
154 unsigned long poll_invoked;
155 /** @poll_success: Count how many polled requests were completed. */
156 unsigned long poll_success;
158 #ifdef CONFIG_BLK_DEBUG_FS
160 * @debugfs_dir: debugfs directory for this hardware queue. Named
161 * as cpu<cpu_number>.
163 struct dentry *debugfs_dir;
164 /** @sched_debugfs_dir: debugfs directory for the scheduler. */
165 struct dentry *sched_debugfs_dir;
169 * @hctx_list: if this hctx is not in use, this is an entry in
170 * q->unused_hctx_list.
172 struct list_head hctx_list;
175 * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
176 * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
177 * blk_mq_hw_ctx_size().
179 struct srcu_struct srcu[];
183 * struct blk_mq_queue_map - Map software queues to hardware queues
184 * @mq_map: CPU ID to hardware queue index map. This is an array
185 * with nr_cpu_ids elements. Each element has a value in the range
186 * [@queue_offset, @queue_offset + @nr_queues).
187 * @nr_queues: Number of hardware queues to map CPU IDs onto.
188 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
189 * driver to map each hardware queue type (enum hctx_type) onto a distinct
190 * set of hardware queues.
192 struct blk_mq_queue_map {
193 unsigned int *mq_map;
194 unsigned int nr_queues;
195 unsigned int queue_offset;
199 * enum hctx_type - Type of hardware queue
200 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
201 * @HCTX_TYPE_READ: Just for READ I/O.
202 * @HCTX_TYPE_POLL: Polled I/O of any kind.
203 * @HCTX_MAX_TYPES: Number of types of hctx.
214 * struct blk_mq_tag_set - tag set that can be shared between request queues
215 * @map: One or more ctx -> hctx mappings. One map exists for each
216 * hardware queue type (enum hctx_type) that the driver wishes
217 * to support. There are no restrictions on maps being of the
218 * same size, and it's perfectly legal to share maps between
220 * @nr_maps: Number of elements in the @map array. A number in the range
221 * [1, HCTX_MAX_TYPES].
222 * @ops: Pointers to functions that implement block driver behavior.
223 * @nr_hw_queues: Number of hardware queues supported by the block driver that
224 * owns this data structure.
225 * @queue_depth: Number of tags per hardware queue, reserved tags included.
226 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
228 * @cmd_size: Number of additional bytes to allocate per request. The block
229 * driver owns these additional bytes.
230 * @numa_node: NUMA node the storage adapter has been connected to.
231 * @timeout: Request processing timeout in jiffies.
232 * @flags: Zero or more BLK_MQ_F_* flags.
233 * @driver_data: Pointer to data owned by the block driver that created this
235 * @active_queues_shared_sbitmap:
236 * number of active request queues per tag set.
237 * @__bitmap_tags: A shared tags sbitmap, used over all hctx's
239 * A shared reserved tags sbitmap, used over all hctx's
240 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
242 * @tag_list_lock: Serializes tag_list accesses.
243 * @tag_list: List of the request queues that use this tag set. See also
244 * request_queue.tag_set_list.
246 struct blk_mq_tag_set {
247 struct blk_mq_queue_map map[HCTX_MAX_TYPES];
248 unsigned int nr_maps;
249 const struct blk_mq_ops *ops;
250 unsigned int nr_hw_queues;
251 unsigned int queue_depth;
252 unsigned int reserved_tags;
253 unsigned int cmd_size;
255 unsigned int timeout;
258 atomic_t active_queues_shared_sbitmap;
260 struct sbitmap_queue __bitmap_tags;
261 struct sbitmap_queue __breserved_tags;
262 struct blk_mq_tags **tags;
264 struct mutex tag_list_lock;
265 struct list_head tag_list;
269 * struct blk_mq_queue_data - Data about a request inserted in a queue
271 * @rq: Request pointer.
272 * @last: If it is the last request in the queue.
274 struct blk_mq_queue_data {
279 typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
281 typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
284 * struct blk_mq_ops - Callback functions that implements block driver
289 * @queue_rq: Queue a new request from block IO.
291 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
292 const struct blk_mq_queue_data *);
295 * @commit_rqs: If a driver uses bd->last to judge when to submit
296 * requests to hardware, it must define this function. In case of errors
297 * that make us stop issuing further requests, this hook serves the
298 * purpose of kicking the hardware (which the last request otherwise
301 void (*commit_rqs)(struct blk_mq_hw_ctx *);
304 * @get_budget: Reserve budget before queue request, once .queue_rq is
305 * run, it is driver's responsibility to release the
306 * reserved budget. Also we have to handle failure case
307 * of .get_budget for avoiding I/O deadlock.
309 int (*get_budget)(struct request_queue *);
312 * @put_budget: Release the reserved budget.
314 void (*put_budget)(struct request_queue *, int);
317 * @set_rq_budget_token: store rq's budget token
319 void (*set_rq_budget_token)(struct request *, int);
321 * @get_rq_budget_token: retrieve rq's budget token
323 int (*get_rq_budget_token)(struct request *);
326 * @timeout: Called on request timeout.
328 enum blk_eh_timer_return (*timeout)(struct request *, bool);
331 * @poll: Called to poll for completion of a specific tag.
333 int (*poll)(struct blk_mq_hw_ctx *);
336 * @complete: Mark the request as complete.
338 void (*complete)(struct request *);
341 * @init_hctx: Called when the block layer side of a hardware queue has
342 * been set up, allowing the driver to allocate/init matching
345 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
347 * @exit_hctx: Ditto for exit/teardown.
349 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
352 * @init_request: Called for every command allocated by the block layer
353 * to allow the driver to set up driver specific data.
355 * Tag greater than or equal to queue_depth is for setting up
358 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
359 unsigned int, unsigned int);
361 * @exit_request: Ditto for exit/teardown.
363 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
367 * @initialize_rq_fn: Called from inside blk_get_request().
369 void (*initialize_rq_fn)(struct request *rq);
372 * @cleanup_rq: Called before freeing one request which isn't completed
373 * yet, and usually for freeing the driver private data.
375 void (*cleanup_rq)(struct request *);
378 * @busy: If set, returns whether or not this queue currently is busy.
380 bool (*busy)(struct request_queue *);
383 * @map_queues: This allows drivers specify their own queue mapping by
384 * overriding the setup-time function that builds the mq_map.
386 int (*map_queues)(struct blk_mq_tag_set *set);
388 #ifdef CONFIG_BLK_DEBUG_FS
390 * @show_rq: Used by the debugfs implementation to show driver-specific
391 * information about a request.
393 void (*show_rq)(struct seq_file *m, struct request *rq);
398 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
399 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
401 * Set when this device requires underlying blk-mq device for
404 BLK_MQ_F_STACKING = 1 << 2,
405 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
406 BLK_MQ_F_BLOCKING = 1 << 5,
407 /* Do not allow an I/O scheduler to be configured. */
408 BLK_MQ_F_NO_SCHED = 1 << 6,
410 * Select 'none' during queue registration in case of a single hwq
411 * or shared hwqs instead of 'mq-deadline'.
413 BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
414 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
415 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
417 BLK_MQ_S_STOPPED = 0,
418 BLK_MQ_S_TAG_ACTIVE = 1,
419 BLK_MQ_S_SCHED_RESTART = 2,
421 /* hw queue is inactive after all its CPUs become offline */
422 BLK_MQ_S_INACTIVE = 3,
424 BLK_MQ_MAX_DEPTH = 10240,
426 BLK_MQ_CPU_WORK_BATCH = 8,
428 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
429 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
430 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
431 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
432 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
433 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
435 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
436 struct lock_class_key *lkclass);
437 #define blk_mq_alloc_disk(set, queuedata) \
439 static struct lock_class_key __key; \
441 __blk_mq_alloc_disk(set, queuedata, &__key); \
443 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
444 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
445 struct request_queue *q);
446 void blk_mq_unregister_dev(struct device *, struct request_queue *);
448 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
449 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
450 const struct blk_mq_ops *ops, unsigned int queue_depth,
451 unsigned int set_flags);
452 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
454 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
456 void blk_mq_free_request(struct request *rq);
458 bool blk_mq_queue_inflight(struct request_queue *q);
461 /* return when out of requests */
462 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
463 /* allocate from reserved pool */
464 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
466 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
469 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
470 blk_mq_req_flags_t flags);
471 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
472 unsigned int op, blk_mq_req_flags_t flags,
473 unsigned int hctx_idx);
474 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
477 BLK_MQ_UNIQUE_TAG_BITS = 16,
478 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
481 u32 blk_mq_unique_tag(struct request *rq);
483 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
485 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
488 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
490 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
494 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
495 * @rq: target request.
497 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
499 return READ_ONCE(rq->state);
502 static inline int blk_mq_request_started(struct request *rq)
504 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
507 static inline int blk_mq_request_completed(struct request *rq)
509 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
514 * Set the state to complete when completing a request from inside ->queue_rq.
515 * This is used by drivers that want to ensure special complete actions that
516 * need access to the request are called on failure, e.g. by nvme for
519 static inline void blk_mq_set_request_complete(struct request *rq)
521 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
524 void blk_mq_start_request(struct request *rq);
525 void blk_mq_end_request(struct request *rq, blk_status_t error);
526 void __blk_mq_end_request(struct request *rq, blk_status_t error);
528 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
529 void blk_mq_kick_requeue_list(struct request_queue *q);
530 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
531 void blk_mq_complete_request(struct request *rq);
532 bool blk_mq_complete_request_remote(struct request *rq);
533 bool blk_mq_queue_stopped(struct request_queue *q);
534 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
535 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
536 void blk_mq_stop_hw_queues(struct request_queue *q);
537 void blk_mq_start_hw_queues(struct request_queue *q);
538 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
539 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
540 void blk_mq_quiesce_queue(struct request_queue *q);
541 void blk_mq_unquiesce_queue(struct request_queue *q);
542 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
543 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
544 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
545 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
546 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
547 busy_tag_iter_fn *fn, void *priv);
548 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
549 void blk_mq_freeze_queue(struct request_queue *q);
550 void blk_mq_unfreeze_queue(struct request_queue *q);
551 void blk_freeze_queue_start(struct request_queue *q);
552 void blk_mq_freeze_queue_wait(struct request_queue *q);
553 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
554 unsigned long timeout);
556 int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
557 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
559 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
561 unsigned int blk_mq_rq_cpu(struct request *rq);
563 bool __blk_should_fake_timeout(struct request_queue *q);
564 static inline bool blk_should_fake_timeout(struct request_queue *q)
566 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
567 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
568 return __blk_should_fake_timeout(q);
573 * blk_mq_rq_from_pdu - cast a PDU to a request
574 * @pdu: the PDU (Protocol Data Unit) to be casted
578 * Driver command data is immediately after the request. So subtract request
579 * size to get back to the original request.
581 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
583 return pdu - sizeof(struct request);
587 * blk_mq_rq_to_pdu - cast a request to a PDU
588 * @rq: the request to be casted
590 * Return: pointer to the PDU
592 * Driver command data is immediately after the request. So add request to get
595 static inline void *blk_mq_rq_to_pdu(struct request *rq)
600 #define queue_for_each_hw_ctx(q, hctx, i) \
601 for ((i) = 0; (i) < (q)->nr_hw_queues && \
602 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
604 #define hctx_for_each_ctx(hctx, ctx, i) \
605 for ((i) = 0; (i) < (hctx)->nr_ctx && \
606 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
608 static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
612 return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
614 return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
618 static inline void blk_mq_cleanup_rq(struct request *rq)
620 if (rq->q->mq_ops->cleanup_rq)
621 rq->q->mq_ops->cleanup_rq(rq);
624 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
625 unsigned int nr_segs)
627 rq->nr_phys_segments = nr_segs;
628 rq->__data_len = bio->bi_iter.bi_size;
629 rq->bio = rq->biotail = bio;
630 rq->ioprio = bio_prio(bio);
633 rq->rq_disk = bio->bi_bdev->bd_disk;
636 blk_qc_t blk_mq_submit_bio(struct bio *bio);
637 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
638 struct lock_class_key *key);