1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blkdev.h>
6 #include <linux/sbitmap.h>
7 #include <linux/srcu.h>
10 struct blk_flush_queue;
13 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
16 struct blk_mq_hw_ctx {
18 /** @lock: Protects the dispatch list. */
21 * @dispatch: Used for requests that are ready to be
22 * dispatched to the hardware but for some reason (e.g. lack of
23 * resources) could not be sent to the hardware. As soon as the
24 * driver can send new requests, requests at this list will
25 * be sent first for a fairer dispatch.
27 struct list_head dispatch;
29 * @state: BLK_MQ_S_* flags. Defines the state of the hw
30 * queue (active, scheduled to restart, stopped).
33 } ____cacheline_aligned_in_smp;
36 * @run_work: Used for scheduling a hardware queue run at a later time.
38 struct delayed_work run_work;
39 /** @cpumask: Map of available CPUs where this hctx can run. */
40 cpumask_var_t cpumask;
42 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
43 * selection from @cpumask.
47 * @next_cpu_batch: Counter of how many works left in the batch before
48 * changing to the next CPU.
52 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
56 * @sched_data: Pointer owned by the IO scheduler attached to a request
57 * queue. It's up to the IO scheduler how to use this pointer.
61 * @queue: Pointer to the request queue that owns this hardware context.
63 struct request_queue *queue;
64 /** @fq: Queue of requests that need to perform a flush operation. */
65 struct blk_flush_queue *fq;
68 * @driver_data: Pointer to data owned by the block driver that created
74 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
75 * pending request in that software queue.
77 struct sbitmap ctx_map;
80 * @dispatch_from: Software queue to be used when no scheduler was
83 struct blk_mq_ctx *dispatch_from;
85 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
86 * decide if the hw_queue is busy using Exponential Weighted Moving
89 unsigned int dispatch_busy;
91 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
93 /** @nr_ctx: Number of software queues. */
94 unsigned short nr_ctx;
95 /** @ctxs: Array of software queues. */
96 struct blk_mq_ctx **ctxs;
98 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
99 spinlock_t dispatch_wait_lock;
101 * @dispatch_wait: Waitqueue to put requests when there is no tag
102 * available at the moment, to wait for another try in the future.
104 wait_queue_entry_t dispatch_wait;
107 * @wait_index: Index of next available dispatch_wait queue to insert
113 * @tags: Tags owned by the block driver. A tag at this set is only
114 * assigned when a request is dispatched from a hardware queue.
116 struct blk_mq_tags *tags;
118 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
119 * scheduler associated with a request queue, a tag is assigned when
120 * that request is allocated. Else, this member is not used.
122 struct blk_mq_tags *sched_tags;
124 /** @queued: Number of queued requests. */
125 unsigned long queued;
126 /** @run: Number of dispatched requests. */
128 #define BLK_MQ_MAX_DISPATCH_ORDER 7
129 /** @dispatched: Number of dispatch requests by queue. */
130 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
132 /** @numa_node: NUMA node the storage adapter has been connected to. */
133 unsigned int numa_node;
134 /** @queue_num: Index of this hardware queue. */
135 unsigned int queue_num;
138 * @nr_active: Number of active requests. Only used when a tag set is
139 * shared across request queues.
143 * @elevator_queued: Number of queued requests on hctx.
145 atomic_t elevator_queued;
147 /** @cpuhp_online: List to store request if CPU is going to die */
148 struct hlist_node cpuhp_online;
149 /** @cpuhp_dead: List to store request if some CPU die. */
150 struct hlist_node cpuhp_dead;
151 /** @kobj: Kernel object for sysfs. */
154 /** @poll_considered: Count times blk_poll() was called. */
155 unsigned long poll_considered;
156 /** @poll_invoked: Count how many requests blk_poll() polled. */
157 unsigned long poll_invoked;
158 /** @poll_success: Count how many polled requests were completed. */
159 unsigned long poll_success;
161 #ifdef CONFIG_BLK_DEBUG_FS
163 * @debugfs_dir: debugfs directory for this hardware queue. Named
164 * as cpu<cpu_number>.
166 struct dentry *debugfs_dir;
167 /** @sched_debugfs_dir: debugfs directory for the scheduler. */
168 struct dentry *sched_debugfs_dir;
172 * @hctx_list: if this hctx is not in use, this is an entry in
173 * q->unused_hctx_list.
175 struct list_head hctx_list;
178 * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
179 * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
180 * blk_mq_hw_ctx_size().
182 struct srcu_struct srcu[];
186 * struct blk_mq_queue_map - Map software queues to hardware queues
187 * @mq_map: CPU ID to hardware queue index map. This is an array
188 * with nr_cpu_ids elements. Each element has a value in the range
189 * [@queue_offset, @queue_offset + @nr_queues).
190 * @nr_queues: Number of hardware queues to map CPU IDs onto.
191 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
192 * driver to map each hardware queue type (enum hctx_type) onto a distinct
193 * set of hardware queues.
195 struct blk_mq_queue_map {
196 unsigned int *mq_map;
197 unsigned int nr_queues;
198 unsigned int queue_offset;
202 * enum hctx_type - Type of hardware queue
203 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
204 * @HCTX_TYPE_READ: Just for READ I/O.
205 * @HCTX_TYPE_POLL: Polled I/O of any kind.
206 * @HCTX_MAX_TYPES: Number of types of hctx.
217 * struct blk_mq_tag_set - tag set that can be shared between request queues
218 * @map: One or more ctx -> hctx mappings. One map exists for each
219 * hardware queue type (enum hctx_type) that the driver wishes
220 * to support. There are no restrictions on maps being of the
221 * same size, and it's perfectly legal to share maps between
223 * @nr_maps: Number of elements in the @map array. A number in the range
224 * [1, HCTX_MAX_TYPES].
225 * @ops: Pointers to functions that implement block driver behavior.
226 * @nr_hw_queues: Number of hardware queues supported by the block driver that
227 * owns this data structure.
228 * @queue_depth: Number of tags per hardware queue, reserved tags included.
229 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
231 * @cmd_size: Number of additional bytes to allocate per request. The block
232 * driver owns these additional bytes.
233 * @numa_node: NUMA node the storage adapter has been connected to.
234 * @timeout: Request processing timeout in jiffies.
235 * @flags: Zero or more BLK_MQ_F_* flags.
236 * @driver_data: Pointer to data owned by the block driver that created this
238 * @__bitmap_tags: A shared tags sbitmap, used over all hctx's
240 * A shared reserved tags sbitmap, used over all hctx's
241 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
243 * @tag_list_lock: Serializes tag_list accesses.
244 * @tag_list: List of the request queues that use this tag set. See also
245 * request_queue.tag_set_list.
247 struct blk_mq_tag_set {
248 struct blk_mq_queue_map map[HCTX_MAX_TYPES];
249 unsigned int nr_maps;
250 const struct blk_mq_ops *ops;
251 unsigned int nr_hw_queues;
252 unsigned int queue_depth;
253 unsigned int reserved_tags;
254 unsigned int cmd_size;
256 unsigned int timeout;
259 atomic_t active_queues_shared_sbitmap;
261 struct sbitmap_queue __bitmap_tags;
262 struct sbitmap_queue __breserved_tags;
263 struct blk_mq_tags **tags;
265 struct mutex tag_list_lock;
266 struct list_head tag_list;
270 * struct blk_mq_queue_data - Data about a request inserted in a queue
272 * @rq: Request pointer.
273 * @last: If it is the last request in the queue.
275 struct blk_mq_queue_data {
280 typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
282 typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
285 * struct blk_mq_ops - Callback functions that implements block driver
290 * @queue_rq: Queue a new request from block IO.
292 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
293 const struct blk_mq_queue_data *);
296 * @commit_rqs: If a driver uses bd->last to judge when to submit
297 * requests to hardware, it must define this function. In case of errors
298 * that make us stop issuing further requests, this hook serves the
299 * purpose of kicking the hardware (which the last request otherwise
302 void (*commit_rqs)(struct blk_mq_hw_ctx *);
305 * @get_budget: Reserve budget before queue request, once .queue_rq is
306 * run, it is driver's responsibility to release the
307 * reserved budget. Also we have to handle failure case
308 * of .get_budget for avoiding I/O deadlock.
310 bool (*get_budget)(struct request_queue *);
313 * @put_budget: Release the reserved budget.
315 void (*put_budget)(struct request_queue *);
318 * @timeout: Called on request timeout.
320 enum blk_eh_timer_return (*timeout)(struct request *, bool);
323 * @poll: Called to poll for completion of a specific tag.
325 int (*poll)(struct blk_mq_hw_ctx *);
328 * @complete: Mark the request as complete.
330 void (*complete)(struct request *);
333 * @init_hctx: Called when the block layer side of a hardware queue has
334 * been set up, allowing the driver to allocate/init matching
337 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
339 * @exit_hctx: Ditto for exit/teardown.
341 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
344 * @init_request: Called for every command allocated by the block layer
345 * to allow the driver to set up driver specific data.
347 * Tag greater than or equal to queue_depth is for setting up
350 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
351 unsigned int, unsigned int);
353 * @exit_request: Ditto for exit/teardown.
355 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
359 * @initialize_rq_fn: Called from inside blk_get_request().
361 void (*initialize_rq_fn)(struct request *rq);
364 * @cleanup_rq: Called before freeing one request which isn't completed
365 * yet, and usually for freeing the driver private data.
367 void (*cleanup_rq)(struct request *);
370 * @busy: If set, returns whether or not this queue currently is busy.
372 bool (*busy)(struct request_queue *);
375 * @map_queues: This allows drivers specify their own queue mapping by
376 * overriding the setup-time function that builds the mq_map.
378 int (*map_queues)(struct blk_mq_tag_set *set);
380 #ifdef CONFIG_BLK_DEBUG_FS
382 * @show_rq: Used by the debugfs implementation to show driver-specific
383 * information about a request.
385 void (*show_rq)(struct seq_file *m, struct request *rq);
390 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
391 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
393 * Set when this device requires underlying blk-mq device for
396 BLK_MQ_F_STACKING = 1 << 2,
397 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
398 BLK_MQ_F_BLOCKING = 1 << 5,
399 BLK_MQ_F_NO_SCHED = 1 << 6,
400 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
401 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
403 BLK_MQ_S_STOPPED = 0,
404 BLK_MQ_S_TAG_ACTIVE = 1,
405 BLK_MQ_S_SCHED_RESTART = 2,
407 /* hw queue is inactive after all its CPUs become offline */
408 BLK_MQ_S_INACTIVE = 3,
410 BLK_MQ_MAX_DEPTH = 10240,
412 BLK_MQ_CPU_WORK_BATCH = 8,
414 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
415 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
416 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
417 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
418 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
419 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
421 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
422 struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
424 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
425 struct request_queue *q,
427 struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
428 const struct blk_mq_ops *ops,
429 unsigned int queue_depth,
430 unsigned int set_flags);
431 void blk_mq_unregister_dev(struct device *, struct request_queue *);
433 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
434 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
436 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
438 void blk_mq_free_request(struct request *rq);
440 bool blk_mq_queue_inflight(struct request_queue *q);
443 /* return when out of requests */
444 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
445 /* allocate from reserved pool */
446 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
447 /* set RQF_PREEMPT */
448 BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3),
451 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
452 blk_mq_req_flags_t flags);
453 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
454 unsigned int op, blk_mq_req_flags_t flags,
455 unsigned int hctx_idx);
456 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
459 BLK_MQ_UNIQUE_TAG_BITS = 16,
460 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
463 u32 blk_mq_unique_tag(struct request *rq);
465 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
467 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
470 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
472 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
476 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
477 * @rq: target request.
479 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
481 return READ_ONCE(rq->state);
484 static inline int blk_mq_request_started(struct request *rq)
486 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
489 static inline int blk_mq_request_completed(struct request *rq)
491 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
494 void blk_mq_start_request(struct request *rq);
495 void blk_mq_end_request(struct request *rq, blk_status_t error);
496 void __blk_mq_end_request(struct request *rq, blk_status_t error);
498 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
499 void blk_mq_kick_requeue_list(struct request_queue *q);
500 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
501 void blk_mq_complete_request(struct request *rq);
502 bool blk_mq_complete_request_remote(struct request *rq);
503 bool blk_mq_queue_stopped(struct request_queue *q);
504 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
505 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
506 void blk_mq_stop_hw_queues(struct request_queue *q);
507 void blk_mq_start_hw_queues(struct request_queue *q);
508 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
509 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
510 void blk_mq_quiesce_queue(struct request_queue *q);
511 void blk_mq_unquiesce_queue(struct request_queue *q);
512 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
513 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
514 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
515 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
516 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
517 busy_tag_iter_fn *fn, void *priv);
518 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
519 void blk_mq_freeze_queue(struct request_queue *q);
520 void blk_mq_unfreeze_queue(struct request_queue *q);
521 void blk_freeze_queue_start(struct request_queue *q);
522 void blk_mq_freeze_queue_wait(struct request_queue *q);
523 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
524 unsigned long timeout);
526 int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
527 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
529 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
531 unsigned int blk_mq_rq_cpu(struct request *rq);
533 bool __blk_should_fake_timeout(struct request_queue *q);
534 static inline bool blk_should_fake_timeout(struct request_queue *q)
536 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
537 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
538 return __blk_should_fake_timeout(q);
543 * blk_mq_rq_from_pdu - cast a PDU to a request
544 * @pdu: the PDU (Protocol Data Unit) to be casted
548 * Driver command data is immediately after the request. So subtract request
549 * size to get back to the original request.
551 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
553 return pdu - sizeof(struct request);
557 * blk_mq_rq_to_pdu - cast a request to a PDU
558 * @rq: the request to be casted
560 * Return: pointer to the PDU
562 * Driver command data is immediately after the request. So add request to get
565 static inline void *blk_mq_rq_to_pdu(struct request *rq)
570 #define queue_for_each_hw_ctx(q, hctx, i) \
571 for ((i) = 0; (i) < (q)->nr_hw_queues && \
572 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
574 #define hctx_for_each_ctx(hctx, ctx, i) \
575 for ((i) = 0; (i) < (hctx)->nr_ctx && \
576 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
578 static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
582 return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
584 return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
588 static inline void blk_mq_cleanup_rq(struct request *rq)
590 if (rq->q->mq_ops->cleanup_rq)
591 rq->q->mq_ops->cleanup_rq(rq);
594 blk_qc_t blk_mq_submit_bio(struct bio *bio);