media: lm3646: remove redundant assignment to variable rval
[linux-2.6-block.git] / block / blk-rq-qos.c
CommitLineData
3dcf60bc
CH
1// SPDX-License-Identifier: GPL-2.0
2
a7905043
JB
3#include "blk-rq-qos.h"
4
a7905043
JB
5/*
6 * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
7 * false if 'v' + 1 would be bigger than 'below'.
8 */
22f17952 9static bool atomic_inc_below(atomic_t *v, unsigned int below)
a7905043 10{
22f17952 11 unsigned int cur = atomic_read(v);
a7905043
JB
12
13 for (;;) {
22f17952 14 unsigned int old;
a7905043
JB
15
16 if (cur >= below)
17 return false;
18 old = atomic_cmpxchg(v, cur, cur + 1);
19 if (old == cur)
20 break;
21 cur = old;
22 }
23
24 return true;
25}
26
22f17952 27bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
a7905043
JB
28{
29 return atomic_inc_below(&rq_wait->inflight, limit);
30}
31
e5045454 32void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio)
a7905043 33{
e5045454 34 do {
a7905043 35 if (rqos->ops->cleanup)
c1c80384 36 rqos->ops->cleanup(rqos, bio);
e5045454
JA
37 rqos = rqos->next;
38 } while (rqos);
a7905043
JB
39}
40
e5045454 41void __rq_qos_done(struct rq_qos *rqos, struct request *rq)
a7905043 42{
e5045454 43 do {
a7905043
JB
44 if (rqos->ops->done)
45 rqos->ops->done(rqos, rq);
e5045454
JA
46 rqos = rqos->next;
47 } while (rqos);
a7905043
JB
48}
49
e5045454 50void __rq_qos_issue(struct rq_qos *rqos, struct request *rq)
a7905043 51{
e5045454 52 do {
a7905043
JB
53 if (rqos->ops->issue)
54 rqos->ops->issue(rqos, rq);
e5045454
JA
55 rqos = rqos->next;
56 } while (rqos);
a7905043
JB
57}
58
e5045454 59void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq)
a7905043 60{
e5045454 61 do {
a7905043
JB
62 if (rqos->ops->requeue)
63 rqos->ops->requeue(rqos, rq);
e5045454
JA
64 rqos = rqos->next;
65 } while (rqos);
a7905043
JB
66}
67
e5045454 68void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio)
a7905043 69{
e5045454 70 do {
a7905043 71 if (rqos->ops->throttle)
d5337560 72 rqos->ops->throttle(rqos, bio);
e5045454
JA
73 rqos = rqos->next;
74 } while (rqos);
c1c80384
JB
75}
76
e5045454 77void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
c1c80384 78{
e5045454 79 do {
c1c80384
JB
80 if (rqos->ops->track)
81 rqos->ops->track(rqos, rq, bio);
d3e65fff
TH
82 rqos = rqos->next;
83 } while (rqos);
84}
85
86void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio)
87{
88 do {
89 if (rqos->ops->merge)
90 rqos->ops->merge(rqos, rq, bio);
e5045454
JA
91 rqos = rqos->next;
92 } while (rqos);
a7905043
JB
93}
94
e5045454 95void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio)
67b42d0b 96{
e5045454 97 do {
67b42d0b
JB
98 if (rqos->ops->done_bio)
99 rqos->ops->done_bio(rqos, bio);
e5045454
JA
100 rqos = rqos->next;
101 } while (rqos);
67b42d0b
JB
102}
103
9677a3e0
TH
104void __rq_qos_queue_depth_changed(struct rq_qos *rqos)
105{
106 do {
107 if (rqos->ops->queue_depth_changed)
108 rqos->ops->queue_depth_changed(rqos);
109 rqos = rqos->next;
110 } while (rqos);
111}
112
a7905043
JB
113/*
114 * Return true, if we can't increase the depth further by scaling
115 */
116bool rq_depth_calc_max_depth(struct rq_depth *rqd)
117{
118 unsigned int depth;
119 bool ret = false;
120
121 /*
122 * For QD=1 devices, this is a special case. It's important for those
123 * to have one request ready when one completes, so force a depth of
124 * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
125 * since the device can't have more than that in flight. If we're
126 * scaling down, then keep a setting of 1/1/1.
127 */
128 if (rqd->queue_depth == 1) {
129 if (rqd->scale_step > 0)
130 rqd->max_depth = 1;
131 else {
132 rqd->max_depth = 2;
133 ret = true;
134 }
135 } else {
136 /*
137 * scale_step == 0 is our default state. If we have suffered
138 * latency spikes, step will be > 0, and we shrink the
139 * allowed write depths. If step is < 0, we're only doing
140 * writes, and we allow a temporarily higher depth to
141 * increase performance.
142 */
143 depth = min_t(unsigned int, rqd->default_depth,
144 rqd->queue_depth);
145 if (rqd->scale_step > 0)
146 depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
147 else if (rqd->scale_step < 0) {
148 unsigned int maxd = 3 * rqd->queue_depth / 4;
149
150 depth = 1 + ((depth - 1) << -rqd->scale_step);
151 if (depth > maxd) {
152 depth = maxd;
153 ret = true;
154 }
155 }
156
157 rqd->max_depth = depth;
158 }
159
160 return ret;
161}
162
163void rq_depth_scale_up(struct rq_depth *rqd)
164{
165 /*
166 * Hit max in previous round, stop here
167 */
168 if (rqd->scaled_max)
169 return;
170
171 rqd->scale_step--;
172
173 rqd->scaled_max = rq_depth_calc_max_depth(rqd);
174}
175
176/*
177 * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
178 * had a latency violation.
179 */
180void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
181{
182 /*
183 * Stop scaling down when we've hit the limit. This also prevents
184 * ->scale_step from going to crazy values, if the device can't
185 * keep up.
186 */
187 if (rqd->max_depth == 1)
188 return;
189
190 if (rqd->scale_step < 0 && hard_throttle)
191 rqd->scale_step = 0;
192 else
193 rqd->scale_step++;
194
195 rqd->scaled_max = false;
196 rq_depth_calc_max_depth(rqd);
197}
198
84f60324
JB
199struct rq_qos_wait_data {
200 struct wait_queue_entry wq;
201 struct task_struct *task;
202 struct rq_wait *rqw;
203 acquire_inflight_cb_t *cb;
204 void *private_data;
205 bool got_token;
206};
207
208static int rq_qos_wake_function(struct wait_queue_entry *curr,
209 unsigned int mode, int wake_flags, void *key)
210{
211 struct rq_qos_wait_data *data = container_of(curr,
212 struct rq_qos_wait_data,
213 wq);
214
215 /*
216 * If we fail to get a budget, return -1 to interrupt the wake up loop
217 * in __wake_up_common.
218 */
219 if (!data->cb(data->rqw, data->private_data))
220 return -1;
221
222 data->got_token = true;
ac38297f 223 smp_wmb();
84f60324
JB
224 list_del_init(&curr->entry);
225 wake_up_process(data->task);
226 return 1;
227}
228
229/**
230 * rq_qos_wait - throttle on a rqw if we need to
83826a50
BVA
231 * @rqw: rqw to throttle on
232 * @private_data: caller provided specific data
233 * @acquire_inflight_cb: inc the rqw->inflight counter if we can
234 * @cleanup_cb: the callback to cleanup in case we race with a waker
84f60324
JB
235 *
236 * This provides a uniform place for the rq_qos users to do their throttling.
237 * Since you can end up with a lot of things sleeping at once, this manages the
238 * waking up based on the resources available. The acquire_inflight_cb should
239 * inc the rqw->inflight if we have the ability to do so, or return false if not
240 * and then we will sleep until the room becomes available.
241 *
242 * cleanup_cb is in case that we race with a waker and need to cleanup the
243 * inflight count accordingly.
244 */
245void rq_qos_wait(struct rq_wait *rqw, void *private_data,
246 acquire_inflight_cb_t *acquire_inflight_cb,
247 cleanup_cb_t *cleanup_cb)
248{
249 struct rq_qos_wait_data data = {
250 .wq = {
251 .func = rq_qos_wake_function,
252 .entry = LIST_HEAD_INIT(data.wq.entry),
253 },
254 .task = current,
255 .rqw = rqw,
256 .cb = acquire_inflight_cb,
257 .private_data = private_data,
258 };
259 bool has_sleeper;
260
261 has_sleeper = wq_has_sleeper(&rqw->wait);
262 if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
263 return;
264
265 prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
545fbd07 266 has_sleeper = !wq_has_single_sleeper(&rqw->wait);
84f60324 267 do {
ac38297f 268 /* The memory barrier in set_task_state saves us here. */
84f60324
JB
269 if (data.got_token)
270 break;
271 if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
272 finish_wait(&rqw->wait, &data.wq);
273
274 /*
275 * We raced with wbt_wake_function() getting a token,
276 * which means we now have two. Put our local token
277 * and wake anyone else potentially waiting for one.
278 */
ac38297f 279 smp_rmb();
84f60324
JB
280 if (data.got_token)
281 cleanup_cb(rqw, private_data);
282 break;
283 }
284 io_schedule();
64e7ea87 285 has_sleeper = true;
d14a9b38 286 set_current_state(TASK_UNINTERRUPTIBLE);
84f60324
JB
287 } while (1);
288 finish_wait(&rqw->wait, &data.wq);
289}
290
a7905043
JB
291void rq_qos_exit(struct request_queue *q)
292{
cc56694f
ML
293 blk_mq_debugfs_unregister_queue_rqos(q);
294
a7905043
JB
295 while (q->rq_qos) {
296 struct rq_qos *rqos = q->rq_qos;
297 q->rq_qos = rqos->next;
298 rqos->ops->exit(rqos);
299 }
300}