Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
a7905043 JB |
2 | #ifndef RQ_QOS_H |
3 | #define RQ_QOS_H | |
4 | ||
5 | #include <linux/kernel.h> | |
6 | #include <linux/blkdev.h> | |
7 | #include <linux/blk_types.h> | |
8 | #include <linux/atomic.h> | |
9 | #include <linux/wait.h> | |
2cafe29a | 10 | #include <linux/blk-mq.h> |
a7905043 | 11 | |
cc56694f ML |
12 | #include "blk-mq-debugfs.h" |
13 | ||
14 | struct blk_mq_debugfs_attr; | |
15 | ||
a7905043 JB |
16 | enum rq_qos_id { |
17 | RQ_QOS_WBT, | |
beab17fc | 18 | RQ_QOS_LATENCY, |
7caa4715 | 19 | RQ_QOS_COST, |
556910e3 | 20 | RQ_QOS_IOPRIO, |
a7905043 JB |
21 | }; |
22 | ||
23 | struct rq_wait { | |
24 | wait_queue_head_t wait; | |
25 | atomic_t inflight; | |
26 | }; | |
27 | ||
28 | struct rq_qos { | |
29 | struct rq_qos_ops *ops; | |
30 | struct request_queue *q; | |
31 | enum rq_qos_id id; | |
32 | struct rq_qos *next; | |
cc56694f ML |
33 | #ifdef CONFIG_BLK_DEBUG_FS |
34 | struct dentry *debugfs_dir; | |
35 | #endif | |
a7905043 JB |
36 | }; |
37 | ||
38 | struct rq_qos_ops { | |
d5337560 | 39 | void (*throttle)(struct rq_qos *, struct bio *); |
c1c80384 | 40 | void (*track)(struct rq_qos *, struct request *, struct bio *); |
d3e65fff | 41 | void (*merge)(struct rq_qos *, struct request *, struct bio *); |
a7905043 JB |
42 | void (*issue)(struct rq_qos *, struct request *); |
43 | void (*requeue)(struct rq_qos *, struct request *); | |
44 | void (*done)(struct rq_qos *, struct request *); | |
67b42d0b | 45 | void (*done_bio)(struct rq_qos *, struct bio *); |
c1c80384 | 46 | void (*cleanup)(struct rq_qos *, struct bio *); |
9677a3e0 | 47 | void (*queue_depth_changed)(struct rq_qos *); |
a7905043 | 48 | void (*exit)(struct rq_qos *); |
cc56694f | 49 | const struct blk_mq_debugfs_attr *debugfs_attrs; |
a7905043 JB |
50 | }; |
51 | ||
52 | struct rq_depth { | |
53 | unsigned int max_depth; | |
54 | ||
55 | int scale_step; | |
56 | bool scaled_max; | |
57 | ||
58 | unsigned int queue_depth; | |
59 | unsigned int default_depth; | |
60 | }; | |
61 | ||
62 | static inline struct rq_qos *rq_qos_id(struct request_queue *q, | |
63 | enum rq_qos_id id) | |
64 | { | |
65 | struct rq_qos *rqos; | |
66 | for (rqos = q->rq_qos; rqos; rqos = rqos->next) { | |
67 | if (rqos->id == id) | |
68 | break; | |
69 | } | |
70 | return rqos; | |
71 | } | |
72 | ||
73 | static inline struct rq_qos *wbt_rq_qos(struct request_queue *q) | |
74 | { | |
75 | return rq_qos_id(q, RQ_QOS_WBT); | |
76 | } | |
77 | ||
78 | static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q) | |
79 | { | |
beab17fc | 80 | return rq_qos_id(q, RQ_QOS_LATENCY); |
a7905043 JB |
81 | } |
82 | ||
83 | static inline void rq_wait_init(struct rq_wait *rq_wait) | |
84 | { | |
85 | atomic_set(&rq_wait->inflight, 0); | |
86 | init_waitqueue_head(&rq_wait->wait); | |
87 | } | |
88 | ||
89 | static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos) | |
90 | { | |
2cafe29a ML |
91 | /* |
92 | * No IO can be in-flight when adding rqos, so freeze queue, which | |
93 | * is fine since we only support rq_qos for blk-mq queue. | |
94 | * | |
95 | * Reuse ->queue_lock for protecting against other concurrent | |
96 | * rq_qos adding/deleting | |
97 | */ | |
98 | blk_mq_freeze_queue(q); | |
99 | ||
100 | spin_lock_irq(&q->queue_lock); | |
a7905043 JB |
101 | rqos->next = q->rq_qos; |
102 | q->rq_qos = rqos; | |
2cafe29a ML |
103 | spin_unlock_irq(&q->queue_lock); |
104 | ||
105 | blk_mq_unfreeze_queue(q); | |
cc56694f ML |
106 | |
107 | if (rqos->ops->debugfs_attrs) | |
108 | blk_mq_debugfs_register_rqos(rqos); | |
a7905043 JB |
109 | } |
110 | ||
111 | static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos) | |
112 | { | |
307f4065 TH |
113 | struct rq_qos **cur; |
114 | ||
2cafe29a ML |
115 | /* |
116 | * See comment in rq_qos_add() about freezing queue & using | |
117 | * ->queue_lock. | |
118 | */ | |
119 | blk_mq_freeze_queue(q); | |
120 | ||
121 | spin_lock_irq(&q->queue_lock); | |
307f4065 TH |
122 | for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) { |
123 | if (*cur == rqos) { | |
124 | *cur = rqos->next; | |
a7905043 JB |
125 | break; |
126 | } | |
a7905043 | 127 | } |
2cafe29a ML |
128 | spin_unlock_irq(&q->queue_lock); |
129 | ||
130 | blk_mq_unfreeze_queue(q); | |
cc56694f ML |
131 | |
132 | blk_mq_debugfs_unregister_rqos(rqos); | |
a7905043 JB |
133 | } |
134 | ||
84f60324 JB |
135 | typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data); |
136 | typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data); | |
137 | ||
138 | void rq_qos_wait(struct rq_wait *rqw, void *private_data, | |
139 | acquire_inflight_cb_t *acquire_inflight_cb, | |
140 | cleanup_cb_t *cleanup_cb); | |
22f17952 | 141 | bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit); |
b84477d3 HS |
142 | bool rq_depth_scale_up(struct rq_depth *rqd); |
143 | bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); | |
a7905043 JB |
144 | bool rq_depth_calc_max_depth(struct rq_depth *rqd); |
145 | ||
e5045454 JA |
146 | void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio); |
147 | void __rq_qos_done(struct rq_qos *rqos, struct request *rq); | |
148 | void __rq_qos_issue(struct rq_qos *rqos, struct request *rq); | |
149 | void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq); | |
150 | void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio); | |
151 | void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio); | |
d3e65fff | 152 | void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio); |
e5045454 | 153 | void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio); |
9677a3e0 | 154 | void __rq_qos_queue_depth_changed(struct rq_qos *rqos); |
e5045454 JA |
155 | |
156 | static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio) | |
157 | { | |
158 | if (q->rq_qos) | |
159 | __rq_qos_cleanup(q->rq_qos, bio); | |
160 | } | |
161 | ||
162 | static inline void rq_qos_done(struct request_queue *q, struct request *rq) | |
163 | { | |
164 | if (q->rq_qos) | |
165 | __rq_qos_done(q->rq_qos, rq); | |
166 | } | |
167 | ||
168 | static inline void rq_qos_issue(struct request_queue *q, struct request *rq) | |
169 | { | |
170 | if (q->rq_qos) | |
171 | __rq_qos_issue(q->rq_qos, rq); | |
172 | } | |
173 | ||
174 | static inline void rq_qos_requeue(struct request_queue *q, struct request *rq) | |
175 | { | |
176 | if (q->rq_qos) | |
177 | __rq_qos_requeue(q->rq_qos, rq); | |
178 | } | |
179 | ||
180 | static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio) | |
181 | { | |
182 | if (q->rq_qos) | |
183 | __rq_qos_done_bio(q->rq_qos, bio); | |
184 | } | |
185 | ||
186 | static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) | |
187 | { | |
13369816 DZ |
188 | /* |
189 | * BIO_TRACKED lets controllers know that a bio went through the | |
190 | * normal rq_qos path. | |
191 | */ | |
90b8faa0 JA |
192 | if (q->rq_qos) { |
193 | bio_set_flag(bio, BIO_TRACKED); | |
e5045454 | 194 | __rq_qos_throttle(q->rq_qos, bio); |
90b8faa0 | 195 | } |
e5045454 JA |
196 | } |
197 | ||
198 | static inline void rq_qos_track(struct request_queue *q, struct request *rq, | |
199 | struct bio *bio) | |
200 | { | |
201 | if (q->rq_qos) | |
202 | __rq_qos_track(q->rq_qos, rq, bio); | |
203 | } | |
204 | ||
d3e65fff TH |
205 | static inline void rq_qos_merge(struct request_queue *q, struct request *rq, |
206 | struct bio *bio) | |
207 | { | |
208 | if (q->rq_qos) | |
209 | __rq_qos_merge(q->rq_qos, rq, bio); | |
210 | } | |
211 | ||
9677a3e0 TH |
212 | static inline void rq_qos_queue_depth_changed(struct request_queue *q) |
213 | { | |
214 | if (q->rq_qos) | |
215 | __rq_qos_queue_depth_changed(q->rq_qos); | |
216 | } | |
217 | ||
a7905043 | 218 | void rq_qos_exit(struct request_queue *); |
e5045454 | 219 | |
a7905043 | 220 | #endif |