block/rq_qos: implement rq_qos_ops->queue_depth_changed()
[linux-block.git] / block / blk-rq-qos.h
CommitLineData
3dcf60bc 1/* SPDX-License-Identifier: GPL-2.0 */
a7905043
JB
2#ifndef RQ_QOS_H
3#define RQ_QOS_H
4
5#include <linux/kernel.h>
6#include <linux/blkdev.h>
7#include <linux/blk_types.h>
8#include <linux/atomic.h>
9#include <linux/wait.h>
10
cc56694f
ML
11#include "blk-mq-debugfs.h"
12
13struct blk_mq_debugfs_attr;
14
a7905043
JB
15enum rq_qos_id {
16 RQ_QOS_WBT,
17 RQ_QOS_CGROUP,
18};
19
20struct rq_wait {
21 wait_queue_head_t wait;
22 atomic_t inflight;
23};
24
25struct rq_qos {
26 struct rq_qos_ops *ops;
27 struct request_queue *q;
28 enum rq_qos_id id;
29 struct rq_qos *next;
cc56694f
ML
30#ifdef CONFIG_BLK_DEBUG_FS
31 struct dentry *debugfs_dir;
32#endif
a7905043
JB
33};
34
35struct rq_qos_ops {
d5337560 36 void (*throttle)(struct rq_qos *, struct bio *);
c1c80384 37 void (*track)(struct rq_qos *, struct request *, struct bio *);
d3e65fff 38 void (*merge)(struct rq_qos *, struct request *, struct bio *);
a7905043
JB
39 void (*issue)(struct rq_qos *, struct request *);
40 void (*requeue)(struct rq_qos *, struct request *);
41 void (*done)(struct rq_qos *, struct request *);
67b42d0b 42 void (*done_bio)(struct rq_qos *, struct bio *);
c1c80384 43 void (*cleanup)(struct rq_qos *, struct bio *);
9677a3e0 44 void (*queue_depth_changed)(struct rq_qos *);
a7905043 45 void (*exit)(struct rq_qos *);
cc56694f 46 const struct blk_mq_debugfs_attr *debugfs_attrs;
a7905043
JB
47};
48
49struct rq_depth {
50 unsigned int max_depth;
51
52 int scale_step;
53 bool scaled_max;
54
55 unsigned int queue_depth;
56 unsigned int default_depth;
57};
58
59static inline struct rq_qos *rq_qos_id(struct request_queue *q,
60 enum rq_qos_id id)
61{
62 struct rq_qos *rqos;
63 for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
64 if (rqos->id == id)
65 break;
66 }
67 return rqos;
68}
69
70static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
71{
72 return rq_qos_id(q, RQ_QOS_WBT);
73}
74
75static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
76{
77 return rq_qos_id(q, RQ_QOS_CGROUP);
78}
79
cc56694f
ML
80static inline const char *rq_qos_id_to_name(enum rq_qos_id id)
81{
82 switch (id) {
83 case RQ_QOS_WBT:
84 return "wbt";
85 case RQ_QOS_CGROUP:
86 return "cgroup";
87 }
88 return "unknown";
89}
90
a7905043
JB
91static inline void rq_wait_init(struct rq_wait *rq_wait)
92{
93 atomic_set(&rq_wait->inflight, 0);
94 init_waitqueue_head(&rq_wait->wait);
95}
96
97static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
98{
99 rqos->next = q->rq_qos;
100 q->rq_qos = rqos;
cc56694f
ML
101
102 if (rqos->ops->debugfs_attrs)
103 blk_mq_debugfs_register_rqos(rqos);
a7905043
JB
104}
105
106static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
107{
108 struct rq_qos *cur, *prev = NULL;
109 for (cur = q->rq_qos; cur; cur = cur->next) {
110 if (cur == rqos) {
111 if (prev)
112 prev->next = rqos->next;
113 else
114 q->rq_qos = cur;
115 break;
116 }
117 prev = cur;
118 }
cc56694f
ML
119
120 blk_mq_debugfs_unregister_rqos(rqos);
a7905043
JB
121}
122
84f60324
JB
123typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
124typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
125
126void rq_qos_wait(struct rq_wait *rqw, void *private_data,
127 acquire_inflight_cb_t *acquire_inflight_cb,
128 cleanup_cb_t *cleanup_cb);
22f17952 129bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
a7905043
JB
130void rq_depth_scale_up(struct rq_depth *rqd);
131void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
132bool rq_depth_calc_max_depth(struct rq_depth *rqd);
133
e5045454
JA
134void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
135void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
136void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
137void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
138void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
139void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
d3e65fff 140void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
e5045454 141void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
9677a3e0 142void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
e5045454
JA
143
144static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
145{
146 if (q->rq_qos)
147 __rq_qos_cleanup(q->rq_qos, bio);
148}
149
150static inline void rq_qos_done(struct request_queue *q, struct request *rq)
151{
152 if (q->rq_qos)
153 __rq_qos_done(q->rq_qos, rq);
154}
155
156static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
157{
158 if (q->rq_qos)
159 __rq_qos_issue(q->rq_qos, rq);
160}
161
162static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
163{
164 if (q->rq_qos)
165 __rq_qos_requeue(q->rq_qos, rq);
166}
167
168static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
169{
170 if (q->rq_qos)
171 __rq_qos_done_bio(q->rq_qos, bio);
172}
173
174static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
175{
13369816
DZ
176 /*
177 * BIO_TRACKED lets controllers know that a bio went through the
178 * normal rq_qos path.
179 */
180 bio_set_flag(bio, BIO_TRACKED);
e5045454
JA
181 if (q->rq_qos)
182 __rq_qos_throttle(q->rq_qos, bio);
183}
184
185static inline void rq_qos_track(struct request_queue *q, struct request *rq,
186 struct bio *bio)
187{
188 if (q->rq_qos)
189 __rq_qos_track(q->rq_qos, rq, bio);
190}
191
d3e65fff
TH
192static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
193 struct bio *bio)
194{
195 if (q->rq_qos)
196 __rq_qos_merge(q->rq_qos, rq, bio);
197}
198
9677a3e0
TH
199static inline void rq_qos_queue_depth_changed(struct request_queue *q)
200{
201 if (q->rq_qos)
202 __rq_qos_queue_depth_changed(q->rq_qos);
203}
204
a7905043 205void rq_qos_exit(struct request_queue *);
e5045454 206
a7905043 207#endif