mfd: kempld-core: Constify variables that point to const structure
[linux-2.6-block.git] / block / blk-mq.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
320ae51f
JA
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
cf43e6be 5#include "blk-stat.h"
244c65a3 6#include "blk-mq-tag.h"
cf43e6be 7
24d2f903
CH
8struct blk_mq_tag_set;
9
fe644072
LW
10/**
11 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
12 */
320ae51f
JA
13struct blk_mq_ctx {
14 struct {
15 spinlock_t lock;
16 struct list_head rq_list;
17 } ____cacheline_aligned_in_smp;
18
19 unsigned int cpu;
20 unsigned int index_hw;
320ae51f
JA
21
22 /* incremented at dispatch time */
23 unsigned long rq_dispatched[2];
24 unsigned long rq_merged;
25
26 /* incremented at completion time */
27 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
28
29 struct request_queue *queue;
30 struct kobject kobj;
4bb659b1 31} ____cacheline_aligned_in_smp;
320ae51f 32
780db207 33void blk_mq_freeze_queue(struct request_queue *q);
3edcc0ce 34void blk_mq_free_queue(struct request_queue *q);
e3a2b3f9 35int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94 36void blk_mq_wake_waiters(struct request_queue *q);
de148297 37bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
2c3ad667 38void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bd6737f1
JA
39bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
40 bool wait);
b347689f
ML
41struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
42 struct blk_mq_ctx *start);
2c3ad667
JA
43
44/*
45 * Internal helpers for allocating/freeing the request map
46 */
cc71a6f4
JA
47void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
48 unsigned int hctx_idx);
49void blk_mq_free_rq_map(struct blk_mq_tags *tags);
50struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
51 unsigned int hctx_idx,
52 unsigned int nr_tags,
53 unsigned int reserved_tags);
54int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
55 unsigned int hctx_idx, unsigned int depth);
2c3ad667
JA
56
57/*
58 * Internal helpers for request insertion into sw queues
59 */
60void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
61 bool at_head);
b0850297 62void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
bd166ef1
JA
63void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
64 struct list_head *list);
320ae51f 65
396eaf21 66/* Used by blk_insert_cloned_request() to issue request directly */
c77ff7fd 67blk_status_t blk_mq_request_issue_directly(struct request *rq);
396eaf21 68
320ae51f
JA
69/*
70 * CPU -> queue mappings
71 */
f14bbe77 72extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
320ae51f 73
7d7e0f90
CH
74static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
75 int cpu)
76{
77 return q->queue_hw_ctx[q->mq_map[cpu]];
78}
79
67aec14c
JA
80/*
81 * sysfs helpers
82 */
737f98cf 83extern void blk_mq_sysfs_init(struct request_queue *q);
7ea5fe31 84extern void blk_mq_sysfs_deinit(struct request_queue *q);
2d0364c8 85extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
67aec14c
JA
86extern int blk_mq_sysfs_register(struct request_queue *q);
87extern void blk_mq_sysfs_unregister(struct request_queue *q);
868f2f0b 88extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
67aec14c 89
e09aae7e
ML
90void blk_mq_release(struct request_queue *q);
91
1d9bd516
TH
92/**
93 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
94 * @rq: target request.
95 */
12f5b931 96static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
1d9bd516 97{
12f5b931 98 return READ_ONCE(rq->state);
1d9bd516
TH
99}
100
1aecfe48
ML
101static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
102 unsigned int cpu)
103{
104 return per_cpu_ptr(q->queue_ctx, cpu);
105}
106
107/*
108 * This assumes per-cpu software queueing queues. They could be per-node
109 * as well, for instance. For now this is hardcoded as-is. Note that we don't
110 * care about preemption, since we know the ctx's are persistent. This does
111 * mean that we can't rely on ctx always matching the currently running CPU.
112 */
113static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
114{
115 return __blk_mq_get_ctx(q, get_cpu());
116}
117
118static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
119{
120 put_cpu();
121}
122
cb96a42c
ML
123struct blk_mq_alloc_data {
124 /* input parameter */
125 struct request_queue *q;
9a95e4ef 126 blk_mq_req_flags_t flags;
229a9287 127 unsigned int shallow_depth;
cb96a42c
ML
128
129 /* input & output parameter */
130 struct blk_mq_ctx *ctx;
131 struct blk_mq_hw_ctx *hctx;
132};
133
4941115b
JA
134static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
135{
bd166ef1
JA
136 if (data->flags & BLK_MQ_REQ_INTERNAL)
137 return data->hctx->sched_tags;
138
4941115b
JA
139 return data->hctx->tags;
140}
141
5d1b25c1
BVA
142static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
143{
144 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
145}
146
19c66e59
ML
147static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
148{
149 return hctx->nr_ctx && hctx->tags;
150}
151
f299b7c7 152void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
bf0ddaba
OS
153 unsigned int inflight[2]);
154void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
155 unsigned int inflight[2]);
f299b7c7 156
de148297
ML
157static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
158{
159 struct request_queue *q = hctx->queue;
160
161 if (q->mq_ops->put_budget)
162 q->mq_ops->put_budget(hctx);
163}
164
88022d72 165static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
de148297
ML
166{
167 struct request_queue *q = hctx->queue;
168
169 if (q->mq_ops->get_budget)
170 return q->mq_ops->get_budget(hctx);
88022d72 171 return true;
de148297
ML
172}
173
244c65a3
ML
174static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
175 struct request *rq)
176{
177 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
178 rq->tag = -1;
179
180 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
181 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
182 atomic_dec(&hctx->nr_active);
183 }
184}
185
186static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
187 struct request *rq)
188{
189 if (rq->tag == -1 || rq->internal_tag == -1)
190 return;
191
192 __blk_mq_put_driver_tag(hctx, rq);
193}
194
195static inline void blk_mq_put_driver_tag(struct request *rq)
196{
197 struct blk_mq_hw_ctx *hctx;
198
199 if (rq->tag == -1 || rq->internal_tag == -1)
200 return;
201
202 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
203 __blk_mq_put_driver_tag(hctx, rq);
204}
205
320ae51f 206#endif