Merge tag 'for-6.10/block-20240511' of git://git.kernel.dk/linux
[linux-block.git] / block / blk-throttle.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
e43473b7
VG
2/*
3 * Interface for controlling IO bandwidth on a request queue
4 *
5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/blkdev.h>
11#include <linux/bio.h>
12#include <linux/blktrace_api.h>
bc9fcbf9 13#include "blk.h"
1d156646 14#include "blk-cgroup-rwstat.h"
e4a19f72 15#include "blk-stat.h"
a7b36ee6 16#include "blk-throttle.h"
e43473b7
VG
17
18/* Max dispatch from a group in 1 round */
e675df2a 19#define THROTL_GRP_QUANTUM 8
e43473b7
VG
20
21/* Total max dispatch from all groups in one round */
e675df2a 22#define THROTL_QUANTUM 32
e43473b7 23
d61fcfa4
SL
24/* Throttling is performed over a slice and after that slice is renewed */
25#define DFL_THROTL_SLICE_HD (HZ / 10)
26#define DFL_THROTL_SLICE_SSD (HZ / 50)
297e3d85 27#define MAX_THROTL_SLICE (HZ)
e43473b7 28
450adcbe
VG
29/* A workqueue to queue throttle related work */
30static struct workqueue_struct *kthrotld_workqueue;
450adcbe 31
e43473b7
VG
32#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
33
b9147dd1
SL
34/* We measure latency for request size from <= 4k to >= 1M */
35#define LATENCY_BUCKET_SIZE 9
36
37struct latency_bucket {
38 unsigned long total_latency; /* ns / 1024 */
39 int samples;
40};
41
42struct avg_latency_bucket {
43 unsigned long latency; /* ns / 1024 */
44 bool valid;
45};
46
e43473b7
VG
47struct throtl_data
48{
e43473b7 49 /* service tree for active throtl groups */
c9e0332e 50 struct throtl_service_queue service_queue;
e43473b7 51
e43473b7
VG
52 struct request_queue *queue;
53
54 /* Total Number of queued bios on READ and WRITE lists */
55 unsigned int nr_queued[2];
56
297e3d85
SL
57 unsigned int throtl_slice;
58
e43473b7 59 /* Work for dispatching throttled bios */
69df0ab0 60 struct work_struct dispatch_work;
b9147dd1
SL
61
62 bool track_bio_latency;
e43473b7
VG
63};
64
e99e88a9 65static void throtl_pending_timer_fn(struct timer_list *t);
69df0ab0 66
3c798398 67static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
0381411e 68{
f95a04af 69 return pd_to_blkg(&tg->pd);
0381411e
TH
70}
71
fda6f272
TH
72/**
73 * sq_to_tg - return the throl_grp the specified service queue belongs to
74 * @sq: the throtl_service_queue of interest
75 *
76 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
77 * embedded in throtl_data, %NULL is returned.
78 */
79static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
80{
81 if (sq && sq->parent_sq)
82 return container_of(sq, struct throtl_grp, service_queue);
83 else
84 return NULL;
85}
86
87/**
88 * sq_to_td - return throtl_data the specified service queue belongs to
89 * @sq: the throtl_service_queue of interest
90 *
b43daedc 91 * A service_queue can be embedded in either a throtl_grp or throtl_data.
fda6f272
TH
92 * Determine the associated throtl_data accordingly and return it.
93 */
94static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
95{
96 struct throtl_grp *tg = sq_to_tg(sq);
97
98 if (tg)
99 return tg->td;
100 else
101 return container_of(sq, struct throtl_data, service_queue);
102}
103
9f626e37
SL
104static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
105{
b22c417c 106 struct blkcg_gq *blkg = tg_to_blkg(tg);
b22c417c
SL
107
108 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
109 return U64_MAX;
7394e31f 110
bf20ab53 111 return tg->bps[rw];
9f626e37
SL
112}
113
114static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
115{
b22c417c 116 struct blkcg_gq *blkg = tg_to_blkg(tg);
b22c417c
SL
117
118 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
119 return UINT_MAX;
9bb67aeb 120
bf20ab53 121 return tg->iops[rw];
9f626e37
SL
122}
123
b9147dd1
SL
124#define request_bucket_index(sectors) \
125 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
126
fda6f272
TH
127/**
128 * throtl_log - log debug message via blktrace
129 * @sq: the service_queue being reported
130 * @fmt: printf format string
131 * @args: printf args
132 *
133 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
134 * throtl_grp; otherwise, just "throtl".
fda6f272
TH
135 */
136#define throtl_log(sq, fmt, args...) do { \
137 struct throtl_grp *__tg = sq_to_tg((sq)); \
138 struct throtl_data *__td = sq_to_td((sq)); \
139 \
140 (void)__td; \
59fa0224
SL
141 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
142 break; \
fda6f272 143 if ((__tg)) { \
35fe6d76 144 blk_add_cgroup_trace_msg(__td->queue, \
f4a6a61c 145 &tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\
fda6f272
TH
146 } else { \
147 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
148 } \
54e7ed12 149} while (0)
e43473b7 150
ea0ea2bc
SL
151static inline unsigned int throtl_bio_data_size(struct bio *bio)
152{
153 /* assume it's one sector */
154 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
155 return 512;
156 return bio->bi_iter.bi_size;
157}
158
c5cc2070
TH
159static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
160{
161 INIT_LIST_HEAD(&qn->node);
162 bio_list_init(&qn->bios);
163 qn->tg = tg;
164}
165
166/**
167 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
168 * @bio: bio being added
169 * @qn: qnode to add bio to
170 * @queued: the service_queue->queued[] list @qn belongs to
171 *
172 * Add @bio to @qn and put @qn on @queued if it's not already on.
173 * @qn->tg's reference count is bumped when @qn is activated. See the
174 * comment on top of throtl_qnode definition for details.
175 */
176static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
177 struct list_head *queued)
178{
179 bio_list_add(&qn->bios, bio);
180 if (list_empty(&qn->node)) {
181 list_add_tail(&qn->node, queued);
182 blkg_get(tg_to_blkg(qn->tg));
183 }
184}
185
186/**
187 * throtl_peek_queued - peek the first bio on a qnode list
188 * @queued: the qnode list to peek
189 */
190static struct bio *throtl_peek_queued(struct list_head *queued)
191{
b7b609de 192 struct throtl_qnode *qn;
c5cc2070
TH
193 struct bio *bio;
194
195 if (list_empty(queued))
196 return NULL;
197
b7b609de 198 qn = list_first_entry(queued, struct throtl_qnode, node);
c5cc2070
TH
199 bio = bio_list_peek(&qn->bios);
200 WARN_ON_ONCE(!bio);
201 return bio;
202}
203
204/**
205 * throtl_pop_queued - pop the first bio form a qnode list
206 * @queued: the qnode list to pop a bio from
207 * @tg_to_put: optional out argument for throtl_grp to put
208 *
209 * Pop the first bio from the qnode list @queued. After popping, the first
210 * qnode is removed from @queued if empty or moved to the end of @queued so
211 * that the popping order is round-robin.
212 *
213 * When the first qnode is removed, its associated throtl_grp should be put
214 * too. If @tg_to_put is NULL, this function automatically puts it;
215 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
216 * responsible for putting it.
217 */
218static struct bio *throtl_pop_queued(struct list_head *queued,
219 struct throtl_grp **tg_to_put)
220{
b7b609de 221 struct throtl_qnode *qn;
c5cc2070
TH
222 struct bio *bio;
223
224 if (list_empty(queued))
225 return NULL;
226
b7b609de 227 qn = list_first_entry(queued, struct throtl_qnode, node);
c5cc2070
TH
228 bio = bio_list_pop(&qn->bios);
229 WARN_ON_ONCE(!bio);
230
231 if (bio_list_empty(&qn->bios)) {
232 list_del_init(&qn->node);
233 if (tg_to_put)
234 *tg_to_put = qn->tg;
235 else
236 blkg_put(tg_to_blkg(qn->tg));
237 } else {
238 list_move_tail(&qn->node, queued);
239 }
240
241 return bio;
242}
243
49a2f1e3 244/* init a service_queue, assumes the caller zeroed it */
b2ce2643 245static void throtl_service_queue_init(struct throtl_service_queue *sq)
49a2f1e3 246{
7e9c5c54
YK
247 INIT_LIST_HEAD(&sq->queued[READ]);
248 INIT_LIST_HEAD(&sq->queued[WRITE]);
9ff01255 249 sq->pending_tree = RB_ROOT_CACHED;
e99e88a9 250 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
69df0ab0
TH
251}
252
0a0b4f79
CH
253static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk,
254 struct blkcg *blkcg, gfp_t gfp)
001bea73 255{
4fb72036 256 struct throtl_grp *tg;
24bdb8ef 257 int rw;
4fb72036 258
0a0b4f79 259 tg = kzalloc_node(sizeof(*tg), gfp, disk->node_id);
4fb72036 260 if (!tg)
77ea7338 261 return NULL;
4fb72036 262
7ca46438
TH
263 if (blkg_rwstat_init(&tg->stat_bytes, gfp))
264 goto err_free_tg;
265
266 if (blkg_rwstat_init(&tg->stat_ios, gfp))
267 goto err_exit_stat_bytes;
268
b2ce2643
TH
269 throtl_service_queue_init(&tg->service_queue);
270
271 for (rw = READ; rw <= WRITE; rw++) {
272 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
273 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
274 }
275
276 RB_CLEAR_NODE(&tg->rb_node);
bf20ab53
YK
277 tg->bps[READ] = U64_MAX;
278 tg->bps[WRITE] = U64_MAX;
279 tg->iops[READ] = UINT_MAX;
280 tg->iops[WRITE] = UINT_MAX;
ec80991d 281
4fb72036 282 return &tg->pd;
7ca46438
TH
283
284err_exit_stat_bytes:
285 blkg_rwstat_exit(&tg->stat_bytes);
286err_free_tg:
287 kfree(tg);
288 return NULL;
001bea73
TH
289}
290
a9520cd6 291static void throtl_pd_init(struct blkg_policy_data *pd)
a29a171e 292{
a9520cd6
TH
293 struct throtl_grp *tg = pd_to_tg(pd);
294 struct blkcg_gq *blkg = tg_to_blkg(tg);
a06377c5 295 struct throtl_data *td = blkg->q->td;
b2ce2643 296 struct throtl_service_queue *sq = &tg->service_queue;
cd1604fa 297
9138125b 298 /*
aa6ec29b 299 * If on the default hierarchy, we switch to properly hierarchical
9138125b
TH
300 * behavior where limits on a given throtl_grp are applied to the
301 * whole subtree rather than just the group itself. e.g. If 16M
f56019ae
KS
302 * read_bps limit is set on a parent group, summary bps of
303 * parent group and its subtree groups can't exceed 16M for the
304 * device.
9138125b 305 *
aa6ec29b 306 * If not on the default hierarchy, the broken flat hierarchy
9138125b
TH
307 * behavior is retained where all throtl_grps are treated as if
308 * they're all separate root groups right below throtl_data.
309 * Limits of a group don't interact with limits of other groups
310 * regardless of the position of the group in the hierarchy.
311 */
b2ce2643 312 sq->parent_sq = &td->service_queue;
9e10a130 313 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
b2ce2643 314 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
77216b04 315 tg->td = td;
8a3d2615
TH
316}
317
693e751e
TH
318/*
319 * Set has_rules[] if @tg or any of its parents have limits configured.
320 * This doesn't require walking up to the top of the hierarchy as the
321 * parent's has_rules[] is guaranteed to be correct.
322 */
323static void tg_update_has_rules(struct throtl_grp *tg)
324{
325 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
326 int rw;
327
81c7a63a
YK
328 for (rw = READ; rw <= WRITE; rw++) {
329 tg->has_rules_iops[rw] =
330 (parent_tg && parent_tg->has_rules_iops[rw]) ||
bf20ab53 331 tg_iops_limit(tg, rw) != UINT_MAX;
81c7a63a
YK
332 tg->has_rules_bps[rw] =
333 (parent_tg && parent_tg->has_rules_bps[rw]) ||
bf20ab53 334 tg_bps_limit(tg, rw) != U64_MAX;
81c7a63a 335 }
693e751e
TH
336}
337
a9520cd6 338static void throtl_pd_online(struct blkg_policy_data *pd)
693e751e 339{
aec24246 340 struct throtl_grp *tg = pd_to_tg(pd);
693e751e
TH
341 /*
342 * We don't want new groups to escape the limits of its ancestors.
343 * Update has_rules[] after a new group is brought online.
344 */
aec24246 345 tg_update_has_rules(tg);
693e751e
TH
346}
347
001bea73
TH
348static void throtl_pd_free(struct blkg_policy_data *pd)
349{
4fb72036
TH
350 struct throtl_grp *tg = pd_to_tg(pd);
351
b2ce2643 352 del_timer_sync(&tg->service_queue.pending_timer);
7ca46438
TH
353 blkg_rwstat_exit(&tg->stat_bytes);
354 blkg_rwstat_exit(&tg->stat_ios);
4fb72036 355 kfree(tg);
001bea73
TH
356}
357
0049af73
TH
358static struct throtl_grp *
359throtl_rb_first(struct throtl_service_queue *parent_sq)
e43473b7 360{
9ff01255 361 struct rb_node *n;
e43473b7 362
9ff01255
LB
363 n = rb_first_cached(&parent_sq->pending_tree);
364 WARN_ON_ONCE(!n);
365 if (!n)
366 return NULL;
367 return rb_entry_tg(n);
e43473b7
VG
368}
369
0049af73
TH
370static void throtl_rb_erase(struct rb_node *n,
371 struct throtl_service_queue *parent_sq)
e43473b7 372{
9ff01255
LB
373 rb_erase_cached(n, &parent_sq->pending_tree);
374 RB_CLEAR_NODE(n);
e43473b7
VG
375}
376
0049af73 377static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
e43473b7
VG
378{
379 struct throtl_grp *tg;
380
0049af73 381 tg = throtl_rb_first(parent_sq);
e43473b7
VG
382 if (!tg)
383 return;
384
0049af73 385 parent_sq->first_pending_disptime = tg->disptime;
e43473b7
VG
386}
387
77216b04 388static void tg_service_queue_add(struct throtl_grp *tg)
e43473b7 389{
77216b04 390 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
9ff01255 391 struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
e43473b7
VG
392 struct rb_node *parent = NULL;
393 struct throtl_grp *__tg;
394 unsigned long key = tg->disptime;
9ff01255 395 bool leftmost = true;
e43473b7
VG
396
397 while (*node != NULL) {
398 parent = *node;
399 __tg = rb_entry_tg(parent);
400
401 if (time_before(key, __tg->disptime))
402 node = &parent->rb_left;
403 else {
404 node = &parent->rb_right;
9ff01255 405 leftmost = false;
e43473b7
VG
406 }
407 }
408
e43473b7 409 rb_link_node(&tg->rb_node, parent, node);
9ff01255
LB
410 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
411 leftmost);
e43473b7
VG
412}
413
77216b04 414static void throtl_enqueue_tg(struct throtl_grp *tg)
e43473b7 415{
29379674
BW
416 if (!(tg->flags & THROTL_TG_PENDING)) {
417 tg_service_queue_add(tg);
418 tg->flags |= THROTL_TG_PENDING;
419 tg->service_queue.parent_sq->nr_pending++;
420 }
e43473b7
VG
421}
422
77216b04 423static void throtl_dequeue_tg(struct throtl_grp *tg)
e43473b7 424{
29379674 425 if (tg->flags & THROTL_TG_PENDING) {
c013710e
YK
426 struct throtl_service_queue *parent_sq =
427 tg->service_queue.parent_sq;
428
429 throtl_rb_erase(&tg->rb_node, parent_sq);
430 --parent_sq->nr_pending;
29379674
BW
431 tg->flags &= ~THROTL_TG_PENDING;
432 }
e43473b7
VG
433}
434
a9131a27 435/* Call with queue lock held */
69df0ab0
TH
436static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
437 unsigned long expires)
a9131a27 438{
a41b816c 439 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
06cceedc
SL
440
441 /*
442 * Since we are adjusting the throttle limit dynamically, the sleep
443 * time calculated according to previous limit might be invalid. It's
444 * possible the cgroup sleep time is very long and no other cgroups
445 * have IO running so notify the limit changes. Make sure the cgroup
446 * doesn't sleep too long to avoid the missed notification.
447 */
448 if (time_after(expires, max_expire))
449 expires = max_expire;
69df0ab0
TH
450 mod_timer(&sq->pending_timer, expires);
451 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
452 expires - jiffies, jiffies);
a9131a27
TH
453}
454
7f52f98c
TH
455/**
456 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
457 * @sq: the service_queue to schedule dispatch for
458 * @force: force scheduling
459 *
460 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
461 * dispatch time of the first pending child. Returns %true if either timer
462 * is armed or there's no pending child left. %false if the current
463 * dispatch window is still open and the caller should continue
464 * dispatching.
465 *
466 * If @force is %true, the dispatch timer is always scheduled and this
467 * function is guaranteed to return %true. This is to be used when the
468 * caller can't dispatch itself and needs to invoke pending_timer
469 * unconditionally. Note that forced scheduling is likely to induce short
470 * delay before dispatch starts even if @sq->first_pending_disptime is not
471 * in the future and thus shouldn't be used in hot paths.
472 */
473static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
474 bool force)
e43473b7 475{
6a525600 476 /* any pending children left? */
c9e0332e 477 if (!sq->nr_pending)
7f52f98c 478 return true;
e43473b7 479
c9e0332e 480 update_min_dispatch_time(sq);
e43473b7 481
69df0ab0 482 /* is the next dispatch time in the future? */
7f52f98c 483 if (force || time_after(sq->first_pending_disptime, jiffies)) {
69df0ab0 484 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
7f52f98c 485 return true;
69df0ab0
TH
486 }
487
7f52f98c
TH
488 /* tell the caller to continue dispatching */
489 return false;
e43473b7
VG
490}
491
32ee5bc4
VG
492static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
493 bool rw, unsigned long start)
494{
495 tg->bytes_disp[rw] = 0;
496 tg->io_disp[rw] = 0;
a880ae93
YK
497 tg->carryover_bytes[rw] = 0;
498 tg->carryover_ios[rw] = 0;
32ee5bc4
VG
499
500 /*
501 * Previous slice has expired. We must have trimmed it after last
502 * bio dispatch. That means since start of last slice, we never used
503 * that bandwidth. Do try to make use of that bandwidth while giving
504 * credit.
505 */
eea3e8b7 506 if (time_after(start, tg->slice_start[rw]))
32ee5bc4
VG
507 tg->slice_start[rw] = start;
508
297e3d85 509 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
32ee5bc4
VG
510 throtl_log(&tg->service_queue,
511 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
512 rw == READ ? 'R' : 'W', tg->slice_start[rw],
513 tg->slice_end[rw], jiffies);
514}
515
a880ae93
YK
516static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
517 bool clear_carryover)
e43473b7
VG
518{
519 tg->bytes_disp[rw] = 0;
8e89d13f 520 tg->io_disp[rw] = 0;
e43473b7 521 tg->slice_start[rw] = jiffies;
297e3d85 522 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
a880ae93
YK
523 if (clear_carryover) {
524 tg->carryover_bytes[rw] = 0;
525 tg->carryover_ios[rw] = 0;
526 }
4f1e9630 527
fda6f272
TH
528 throtl_log(&tg->service_queue,
529 "[%c] new slice start=%lu end=%lu jiffies=%lu",
530 rw == READ ? 'R' : 'W', tg->slice_start[rw],
531 tg->slice_end[rw], jiffies);
e43473b7
VG
532}
533
0f3457f6
TH
534static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
535 unsigned long jiffy_end)
d1ae8ffd 536{
297e3d85 537 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
d1ae8ffd
VG
538}
539
0f3457f6
TH
540static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
541 unsigned long jiffy_end)
e43473b7 542{
1da30f95 543 throtl_set_slice_end(tg, rw, jiffy_end);
fda6f272
TH
544 throtl_log(&tg->service_queue,
545 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
546 rw == READ ? 'R' : 'W', tg->slice_start[rw],
547 tg->slice_end[rw], jiffies);
e43473b7
VG
548}
549
550/* Determine if previously allocated or extended slice is complete or not */
0f3457f6 551static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
e43473b7
VG
552{
553 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
5cf8c227 554 return false;
e43473b7 555
0b6bad7d 556 return true;
e43473b7
VG
557}
558
e8368b57
YK
559static unsigned int calculate_io_allowed(u32 iops_limit,
560 unsigned long jiffy_elapsed)
561{
562 unsigned int io_allowed;
563 u64 tmp;
564
565 /*
566 * jiffy_elapsed should not be a big value as minimum iops can be
567 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
568 * will allow dispatch after 1 second and after that slice should
569 * have been trimmed.
570 */
571
572 tmp = (u64)iops_limit * jiffy_elapsed;
573 do_div(tmp, HZ);
574
575 if (tmp > UINT_MAX)
576 io_allowed = UINT_MAX;
577 else
578 io_allowed = tmp;
579
580 return io_allowed;
581}
582
583static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
584{
2dd710d4
KK
585 /*
586 * Can result be wider than 64 bits?
587 * We check against 62, not 64, due to ilog2 truncation.
588 */
589 if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62)
590 return U64_MAX;
e8368b57
YK
591 return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
592}
593
e43473b7 594/* Trim the used slices and adjust slice start accordingly */
0f3457f6 595static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
e43473b7 596{
eead0056
YK
597 unsigned long time_elapsed;
598 long long bytes_trim;
599 int io_trim;
e43473b7
VG
600
601 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
602
603 /*
604 * If bps are unlimited (-1), then time slice don't get
605 * renewed. Don't try to trim the slice if slice is used. A new
606 * slice will start when appropriate.
607 */
0f3457f6 608 if (throtl_slice_used(tg, rw))
e43473b7
VG
609 return;
610
d1ae8ffd
VG
611 /*
612 * A bio has been dispatched. Also adjust slice_end. It might happen
613 * that initially cgroup limit was very low resulting in high
b53b072c 614 * slice_end, but later limit was bumped up and bio was dispatched
d1ae8ffd
VG
615 * sooner, then we need to reduce slice_end. A high bogus slice_end
616 * is bad because it does not allow new slice to start.
617 */
618
297e3d85 619 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
d1ae8ffd 620
e8368b57
YK
621 time_elapsed = rounddown(jiffies - tg->slice_start[rw],
622 tg->td->throtl_slice);
623 if (!time_elapsed)
e43473b7 624 return;
e43473b7 625
e8368b57 626 bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
eead0056
YK
627 time_elapsed) +
628 tg->carryover_bytes[rw];
629 io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
630 tg->carryover_ios[rw];
631 if (bytes_trim <= 0 && io_trim <= 0)
e43473b7
VG
632 return;
633
eead0056
YK
634 tg->carryover_bytes[rw] = 0;
635 if ((long long)tg->bytes_disp[rw] >= bytes_trim)
e43473b7
VG
636 tg->bytes_disp[rw] -= bytes_trim;
637 else
638 tg->bytes_disp[rw] = 0;
639
eead0056
YK
640 tg->carryover_ios[rw] = 0;
641 if ((int)tg->io_disp[rw] >= io_trim)
8e89d13f
VG
642 tg->io_disp[rw] -= io_trim;
643 else
644 tg->io_disp[rw] = 0;
645
e8368b57 646 tg->slice_start[rw] += time_elapsed;
e43473b7 647
fda6f272 648 throtl_log(&tg->service_queue,
eead0056 649 "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
e8368b57
YK
650 rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
651 bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
652 jiffies);
681cd46f
YK
653}
654
a880ae93
YK
655static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
656{
657 unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
658 u64 bps_limit = tg_bps_limit(tg, rw);
659 u32 iops_limit = tg_iops_limit(tg, rw);
660
661 /*
662 * If config is updated while bios are still throttled, calculate and
663 * accumulate how many bytes/ios are waited across changes. And
664 * carryover_bytes/ios will be used to calculate new wait time under new
665 * configuration.
666 */
667 if (bps_limit != U64_MAX)
668 tg->carryover_bytes[rw] +=
669 calculate_bytes_allowed(bps_limit, jiffy_elapsed) -
670 tg->bytes_disp[rw];
671 if (iops_limit != UINT_MAX)
672 tg->carryover_ios[rw] +=
673 calculate_io_allowed(iops_limit, jiffy_elapsed) -
674 tg->io_disp[rw];
675}
676
677static void tg_update_carryover(struct throtl_grp *tg)
678{
679 if (tg->service_queue.nr_queued[READ])
680 __tg_update_carryover(tg, READ);
681 if (tg->service_queue.nr_queued[WRITE])
682 __tg_update_carryover(tg, WRITE);
683
684 /* see comments in struct throtl_grp for meaning of these fields. */
ef100397 685 throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__,
a880ae93
YK
686 tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
687 tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
688}
689
183daeb1
KS
690static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
691 u32 iops_limit)
681cd46f
YK
692{
693 bool rw = bio_data_dir(bio);
bb8d5587 694 int io_allowed;
681cd46f
YK
695 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
696
697 if (iops_limit == UINT_MAX) {
183daeb1 698 return 0;
681cd46f
YK
699 }
700
701 jiffy_elapsed = jiffies - tg->slice_start[rw];
702
703 /* Round up to the next throttle slice, wait time must be nonzero */
704 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
a880ae93
YK
705 io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) +
706 tg->carryover_ios[rw];
bb8d5587 707 if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
183daeb1 708 return 0;
e43473b7 709
8e89d13f 710 /* Calc approx time to dispatch */
991f61fe 711 jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
183daeb1 712 return jiffy_wait;
8e89d13f
VG
713}
714
183daeb1
KS
715static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
716 u64 bps_limit)
8e89d13f
VG
717{
718 bool rw = bio_data_dir(bio);
bb8d5587
YK
719 long long bytes_allowed;
720 u64 extra_bytes;
8e89d13f 721 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
ea0ea2bc 722 unsigned int bio_size = throtl_bio_data_size(bio);
e43473b7 723
9f5ede3c 724 /* no need to throttle if this bio's bytes have been accounted */
320fb0f9 725 if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) {
183daeb1 726 return 0;
87fbeb88
BW
727 }
728
e43473b7
VG
729 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
730
731 /* Slice has just started. Consider one slice interval */
732 if (!jiffy_elapsed)
297e3d85 733 jiffy_elapsed_rnd = tg->td->throtl_slice;
e43473b7 734
297e3d85 735 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
a880ae93
YK
736 bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) +
737 tg->carryover_bytes[rw];
bb8d5587 738 if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
183daeb1 739 return 0;
e43473b7
VG
740
741 /* Calc approx time to dispatch */
ea0ea2bc 742 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
4599ea49 743 jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
e43473b7
VG
744
745 if (!jiffy_wait)
746 jiffy_wait = 1;
747
748 /*
749 * This wait time is without taking into consideration the rounding
750 * up we did. Add that time also.
751 */
752 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
183daeb1 753 return jiffy_wait;
8e89d13f
VG
754}
755
756/*
757 * Returns whether one can dispatch a bio or not. Also returns approx number
758 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
759 */
0f3457f6
TH
760static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
761 unsigned long *wait)
8e89d13f
VG
762{
763 bool rw = bio_data_dir(bio);
764 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
4599ea49
BW
765 u64 bps_limit = tg_bps_limit(tg, rw);
766 u32 iops_limit = tg_iops_limit(tg, rw);
8e89d13f
VG
767
768 /*
769 * Currently whole state machine of group depends on first bio
770 * queued in the group bio list. So one should not be calling
771 * this function with a different bio if there are other bios
772 * queued.
773 */
73f0d49a 774 BUG_ON(tg->service_queue.nr_queued[rw] &&
c5cc2070 775 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
e43473b7 776
8e89d13f 777 /* If tg->bps = -1, then BW is unlimited */
8f9e7b65
YK
778 if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) ||
779 tg->flags & THROTL_TG_CANCELING) {
8e89d13f
VG
780 if (wait)
781 *wait = 0;
5cf8c227 782 return true;
8e89d13f
VG
783 }
784
785 /*
786 * If previous slice expired, start a new one otherwise renew/extend
787 * existing slice to make sure it is at least throtl_slice interval
164c80ed
VG
788 * long since now. New slice is started only for empty throttle group.
789 * If there is queued bio, that means there should be an active
790 * slice and it should be extended instead.
8e89d13f 791 */
164c80ed 792 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
a880ae93 793 throtl_start_new_slice(tg, rw, true);
8e89d13f 794 else {
297e3d85
SL
795 if (time_before(tg->slice_end[rw],
796 jiffies + tg->td->throtl_slice))
797 throtl_extend_slice(tg, rw,
798 jiffies + tg->td->throtl_slice);
8e89d13f
VG
799 }
800
183daeb1
KS
801 bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
802 iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
803 if (bps_wait + iops_wait == 0) {
8e89d13f
VG
804 if (wait)
805 *wait = 0;
0b6bad7d 806 return true;
8e89d13f
VG
807 }
808
809 max_wait = max(bps_wait, iops_wait);
810
811 if (wait)
812 *wait = max_wait;
813
814 if (time_before(tg->slice_end[rw], jiffies + max_wait))
0f3457f6 815 throtl_extend_slice(tg, rw, jiffies + max_wait);
e43473b7 816
0b6bad7d 817 return false;
e43473b7
VG
818}
819
820static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
821{
822 bool rw = bio_data_dir(bio);
ea0ea2bc 823 unsigned int bio_size = throtl_bio_data_size(bio);
e43473b7
VG
824
825 /* Charge the bio to the group */
320fb0f9 826 if (!bio_flagged(bio, BIO_BPS_THROTTLED)) {
9f5ede3c
ML
827 tg->bytes_disp[rw] += bio_size;
828 tg->last_bytes_disp[rw] += bio_size;
829 }
830
8e89d13f 831 tg->io_disp[rw]++;
3f0abd80 832 tg->last_io_disp[rw]++;
e43473b7
VG
833}
834
c5cc2070
TH
835/**
836 * throtl_add_bio_tg - add a bio to the specified throtl_grp
837 * @bio: bio to add
838 * @qn: qnode to use
839 * @tg: the target throtl_grp
840 *
841 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
842 * tg->qnode_on_self[] is used.
843 */
844static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
845 struct throtl_grp *tg)
e43473b7 846{
73f0d49a 847 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
848 bool rw = bio_data_dir(bio);
849
c5cc2070
TH
850 if (!qn)
851 qn = &tg->qnode_on_self[rw];
852
0e9f4164
TH
853 /*
854 * If @tg doesn't currently have any bios queued in the same
855 * direction, queueing @bio can change when @tg should be
856 * dispatched. Mark that @tg was empty. This is automatically
b53b072c 857 * cleared on the next tg_update_disptime().
0e9f4164
TH
858 */
859 if (!sq->nr_queued[rw])
860 tg->flags |= THROTL_TG_WAS_EMPTY;
861
c5cc2070
TH
862 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
863
73f0d49a 864 sq->nr_queued[rw]++;
77216b04 865 throtl_enqueue_tg(tg);
e43473b7
VG
866}
867
77216b04 868static void tg_update_disptime(struct throtl_grp *tg)
e43473b7 869{
73f0d49a 870 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
871 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
872 struct bio *bio;
873
d609af3a
ME
874 bio = throtl_peek_queued(&sq->queued[READ]);
875 if (bio)
0f3457f6 876 tg_may_dispatch(tg, bio, &read_wait);
e43473b7 877
d609af3a
ME
878 bio = throtl_peek_queued(&sq->queued[WRITE]);
879 if (bio)
0f3457f6 880 tg_may_dispatch(tg, bio, &write_wait);
e43473b7
VG
881
882 min_wait = min(read_wait, write_wait);
883 disptime = jiffies + min_wait;
884
e43473b7 885 /* Update dispatch time */
c013710e 886 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
e43473b7 887 tg->disptime = disptime;
c013710e 888 tg_service_queue_add(tg);
0e9f4164
TH
889
890 /* see throtl_add_bio_tg() */
891 tg->flags &= ~THROTL_TG_WAS_EMPTY;
e43473b7
VG
892}
893
32ee5bc4
VG
894static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
895 struct throtl_grp *parent_tg, bool rw)
896{
897 if (throtl_slice_used(parent_tg, rw)) {
898 throtl_start_new_slice_with_credit(parent_tg, rw,
899 child_tg->slice_start[rw]);
900 }
901
902}
903
77216b04 904static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
e43473b7 905{
73f0d49a 906 struct throtl_service_queue *sq = &tg->service_queue;
6bc9c2b4
TH
907 struct throtl_service_queue *parent_sq = sq->parent_sq;
908 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
c5cc2070 909 struct throtl_grp *tg_to_put = NULL;
e43473b7
VG
910 struct bio *bio;
911
c5cc2070
TH
912 /*
913 * @bio is being transferred from @tg to @parent_sq. Popping a bio
914 * from @tg may put its reference and @parent_sq might end up
915 * getting released prematurely. Remember the tg to put and put it
916 * after @bio is transferred to @parent_sq.
917 */
918 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
73f0d49a 919 sq->nr_queued[rw]--;
e43473b7
VG
920
921 throtl_charge_bio(tg, bio);
6bc9c2b4
TH
922
923 /*
924 * If our parent is another tg, we just need to transfer @bio to
925 * the parent using throtl_add_bio_tg(). If our parent is
926 * @td->service_queue, @bio is ready to be issued. Put it on its
927 * bio_lists[] and decrease total number queued. The caller is
928 * responsible for issuing these bios.
929 */
930 if (parent_tg) {
c5cc2070 931 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
32ee5bc4 932 start_parent_slice_with_credit(tg, parent_tg, rw);
6bc9c2b4 933 } else {
84aca0a7 934 bio_set_flag(bio, BIO_BPS_THROTTLED);
c5cc2070
TH
935 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
936 &parent_sq->queued[rw]);
6bc9c2b4
TH
937 BUG_ON(tg->td->nr_queued[rw] <= 0);
938 tg->td->nr_queued[rw]--;
939 }
e43473b7 940
0f3457f6 941 throtl_trim_slice(tg, rw);
6bc9c2b4 942
c5cc2070
TH
943 if (tg_to_put)
944 blkg_put(tg_to_blkg(tg_to_put));
e43473b7
VG
945}
946
77216b04 947static int throtl_dispatch_tg(struct throtl_grp *tg)
e43473b7 948{
73f0d49a 949 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7 950 unsigned int nr_reads = 0, nr_writes = 0;
e675df2a
BW
951 unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
952 unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
e43473b7
VG
953 struct bio *bio;
954
955 /* Try to dispatch 75% READS and 25% WRITES */
956
c5cc2070 957 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
0f3457f6 958 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 959
3bca7640 960 tg_dispatch_one_bio(tg, READ);
e43473b7
VG
961 nr_reads++;
962
963 if (nr_reads >= max_nr_reads)
964 break;
965 }
966
c5cc2070 967 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
0f3457f6 968 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 969
3bca7640 970 tg_dispatch_one_bio(tg, WRITE);
e43473b7
VG
971 nr_writes++;
972
973 if (nr_writes >= max_nr_writes)
974 break;
975 }
976
977 return nr_reads + nr_writes;
978}
979
651930bc 980static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
e43473b7
VG
981{
982 unsigned int nr_disp = 0;
e43473b7
VG
983
984 while (1) {
2397611a 985 struct throtl_grp *tg;
2ab74cd2 986 struct throtl_service_queue *sq;
e43473b7 987
2397611a
BW
988 if (!parent_sq->nr_pending)
989 break;
990
991 tg = throtl_rb_first(parent_sq);
e43473b7
VG
992 if (!tg)
993 break;
994
995 if (time_before(jiffies, tg->disptime))
996 break;
997
77216b04 998 nr_disp += throtl_dispatch_tg(tg);
e43473b7 999
2ab74cd2 1000 sq = &tg->service_queue;
7e9c5c54 1001 if (sq->nr_queued[READ] || sq->nr_queued[WRITE])
77216b04 1002 tg_update_disptime(tg);
8c25ed0c
YK
1003 else
1004 throtl_dequeue_tg(tg);
e43473b7 1005
e675df2a 1006 if (nr_disp >= THROTL_QUANTUM)
e43473b7
VG
1007 break;
1008 }
1009
1010 return nr_disp;
1011}
1012
6e1a5704
TH
1013/**
1014 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
216382dc 1015 * @t: the pending_timer member of the throtl_service_queue being serviced
6e1a5704
TH
1016 *
1017 * This timer is armed when a child throtl_grp with active bio's become
1018 * pending and queued on the service_queue's pending_tree and expires when
1019 * the first child throtl_grp should be dispatched. This function
2e48a530
TH
1020 * dispatches bio's from the children throtl_grps to the parent
1021 * service_queue.
1022 *
1023 * If the parent's parent is another throtl_grp, dispatching is propagated
1024 * by either arming its pending_timer or repeating dispatch directly. If
1025 * the top-level service_tree is reached, throtl_data->dispatch_work is
1026 * kicked so that the ready bio's are issued.
6e1a5704 1027 */
e99e88a9 1028static void throtl_pending_timer_fn(struct timer_list *t)
69df0ab0 1029{
e99e88a9 1030 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
2e48a530 1031 struct throtl_grp *tg = sq_to_tg(sq);
69df0ab0 1032 struct throtl_data *td = sq_to_td(sq);
2e48a530 1033 struct throtl_service_queue *parent_sq;
ee37eddb 1034 struct request_queue *q;
2e48a530 1035 bool dispatched;
6e1a5704 1036 int ret;
e43473b7 1037
ee37eddb
ML
1038 /* throtl_data may be gone, so figure out request queue by blkg */
1039 if (tg)
a06377c5 1040 q = tg->pd.blkg->q;
ee37eddb
ML
1041 else
1042 q = td->queue;
1043
0d945c1f 1044 spin_lock_irq(&q->queue_lock);
ee37eddb 1045
1231039d 1046 if (!q->root_blkg)
ee37eddb
ML
1047 goto out_unlock;
1048
2e48a530
TH
1049again:
1050 parent_sq = sq->parent_sq;
1051 dispatched = false;
e43473b7 1052
7f52f98c
TH
1053 while (true) {
1054 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
2e48a530
TH
1055 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1056 sq->nr_queued[READ], sq->nr_queued[WRITE]);
7f52f98c
TH
1057
1058 ret = throtl_select_dispatch(sq);
1059 if (ret) {
7f52f98c
TH
1060 throtl_log(sq, "bios disp=%u", ret);
1061 dispatched = true;
1062 }
e43473b7 1063
7f52f98c
TH
1064 if (throtl_schedule_next_dispatch(sq, false))
1065 break;
e43473b7 1066
7f52f98c 1067 /* this dispatch windows is still open, relax and repeat */
0d945c1f 1068 spin_unlock_irq(&q->queue_lock);
7f52f98c 1069 cpu_relax();
0d945c1f 1070 spin_lock_irq(&q->queue_lock);
651930bc 1071 }
e43473b7 1072
2e48a530
TH
1073 if (!dispatched)
1074 goto out_unlock;
6e1a5704 1075
2e48a530
TH
1076 if (parent_sq) {
1077 /* @parent_sq is another throl_grp, propagate dispatch */
1078 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1079 tg_update_disptime(tg);
1080 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1081 /* window is already open, repeat dispatching */
1082 sq = parent_sq;
1083 tg = sq_to_tg(sq);
1084 goto again;
1085 }
1086 }
1087 } else {
b53b072c 1088 /* reached the top-level, queue issuing */
2e48a530
TH
1089 queue_work(kthrotld_workqueue, &td->dispatch_work);
1090 }
1091out_unlock:
0d945c1f 1092 spin_unlock_irq(&q->queue_lock);
6e1a5704 1093}
e43473b7 1094
6e1a5704
TH
1095/**
1096 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1097 * @work: work item being executed
1098 *
b53b072c
BW
1099 * This function is queued for execution when bios reach the bio_lists[]
1100 * of throtl_data->service_queue. Those bios are ready and issued by this
6e1a5704
TH
1101 * function.
1102 */
8876e140 1103static void blk_throtl_dispatch_work_fn(struct work_struct *work)
6e1a5704
TH
1104{
1105 struct throtl_data *td = container_of(work, struct throtl_data,
1106 dispatch_work);
1107 struct throtl_service_queue *td_sq = &td->service_queue;
1108 struct request_queue *q = td->queue;
1109 struct bio_list bio_list_on_stack;
1110 struct bio *bio;
1111 struct blk_plug plug;
1112 int rw;
1113
1114 bio_list_init(&bio_list_on_stack);
1115
0d945c1f 1116 spin_lock_irq(&q->queue_lock);
c5cc2070
TH
1117 for (rw = READ; rw <= WRITE; rw++)
1118 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1119 bio_list_add(&bio_list_on_stack, bio);
0d945c1f 1120 spin_unlock_irq(&q->queue_lock);
6e1a5704
TH
1121
1122 if (!bio_list_empty(&bio_list_on_stack)) {
69d60eb9 1123 blk_start_plug(&plug);
ed00aabd 1124 while ((bio = bio_list_pop(&bio_list_on_stack)))
3f98c753 1125 submit_bio_noacct_nocheck(bio);
69d60eb9 1126 blk_finish_plug(&plug);
e43473b7 1127 }
e43473b7
VG
1128}
1129
f95a04af
TH
1130static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1131 int off)
60c2bc2d 1132{
f95a04af
TH
1133 struct throtl_grp *tg = pd_to_tg(pd);
1134 u64 v = *(u64 *)((void *)tg + off);
60c2bc2d 1135
2ab5492d 1136 if (v == U64_MAX)
60c2bc2d 1137 return 0;
f95a04af 1138 return __blkg_prfill_u64(sf, pd, v);
60c2bc2d
TH
1139}
1140
f95a04af
TH
1141static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1142 int off)
e43473b7 1143{
f95a04af
TH
1144 struct throtl_grp *tg = pd_to_tg(pd);
1145 unsigned int v = *(unsigned int *)((void *)tg + off);
fe071437 1146
2ab5492d 1147 if (v == UINT_MAX)
af133ceb 1148 return 0;
f95a04af 1149 return __blkg_prfill_u64(sf, pd, v);
e43473b7
VG
1150}
1151
2da8ca82 1152static int tg_print_conf_u64(struct seq_file *sf, void *v)
8e89d13f 1153{
2da8ca82
TH
1154 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1155 &blkcg_policy_throtl, seq_cft(sf)->private, false);
af133ceb 1156 return 0;
8e89d13f
VG
1157}
1158
2da8ca82 1159static int tg_print_conf_uint(struct seq_file *sf, void *v)
8e89d13f 1160{
2da8ca82
TH
1161 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1162 &blkcg_policy_throtl, seq_cft(sf)->private, false);
af133ceb 1163 return 0;
60c2bc2d
TH
1164}
1165
9bb67aeb 1166static void tg_conf_updated(struct throtl_grp *tg, bool global)
60c2bc2d 1167{
69948b07 1168 struct throtl_service_queue *sq = &tg->service_queue;
492eb21b 1169 struct cgroup_subsys_state *pos_css;
69948b07 1170 struct blkcg_gq *blkg;
af133ceb 1171
fda6f272
TH
1172 throtl_log(&tg->service_queue,
1173 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
9f626e37
SL
1174 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1175 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
632b4493 1176
27b13e20 1177 rcu_read_lock();
693e751e
TH
1178 /*
1179 * Update has_rules[] flags for the updated tg's subtree. A tg is
1180 * considered to have rules if either the tg itself or any of its
1181 * ancestors has rules. This identifies groups without any
1182 * restrictions in the whole hierarchy and allows them to bypass
1183 * blk-throttle.
1184 */
9bb67aeb 1185 blkg_for_each_descendant_pre(blkg, pos_css,
1231039d 1186 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
5b81fc3c 1187 struct throtl_grp *this_tg = blkg_to_tg(blkg);
5b81fc3c
SL
1188
1189 tg_update_has_rules(this_tg);
1190 /* ignore root/second level */
1191 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1192 !blkg->parent->parent)
1193 continue;
5b81fc3c 1194 }
27b13e20 1195 rcu_read_unlock();
693e751e 1196
632b4493
TH
1197 /*
1198 * We're already holding queue_lock and know @tg is valid. Let's
1199 * apply the new config directly.
1200 *
1201 * Restart the slices for both READ and WRITES. It might happen
1202 * that a group's limit are dropped suddenly and we don't want to
1203 * account recently dispatched IO with new low rate.
1204 */
a880ae93
YK
1205 throtl_start_new_slice(tg, READ, false);
1206 throtl_start_new_slice(tg, WRITE, false);
632b4493 1207
5b2c16aa 1208 if (tg->flags & THROTL_TG_PENDING) {
77216b04 1209 tg_update_disptime(tg);
7f52f98c 1210 throtl_schedule_next_dispatch(sq->parent_sq, true);
632b4493 1211 }
69948b07
TH
1212}
1213
a3166c51
YK
1214static int blk_throtl_init(struct gendisk *disk)
1215{
1216 struct request_queue *q = disk->queue;
1217 struct throtl_data *td;
1218 int ret;
1219
1220 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1221 if (!td)
1222 return -ENOMEM;
1223
1224 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1225 throtl_service_queue_init(&td->service_queue);
1226
1227 /*
1228 * Freeze queue before activating policy, to synchronize with IO path,
1229 * which is protected by 'q_usage_counter'.
1230 */
1231 blk_mq_freeze_queue(disk->queue);
1232 blk_mq_quiesce_queue(disk->queue);
1233
1234 q->td = td;
1235 td->queue = q;
1236
1237 /* activate policy */
1238 ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
1239 if (ret) {
1240 q->td = NULL;
1241 kfree(td);
1242 goto out;
1243 }
1244
1245 if (blk_queue_nonrot(q))
1246 td->throtl_slice = DFL_THROTL_SLICE_SSD;
1247 else
1248 td->throtl_slice = DFL_THROTL_SLICE_HD;
1249 td->track_bio_latency = !queue_is_mq(q);
1250 if (!td->track_bio_latency)
1251 blk_stat_enable_accounting(q);
1252
1253out:
1254 blk_mq_unquiesce_queue(disk->queue);
1255 blk_mq_unfreeze_queue(disk->queue);
1256
1257 return ret;
1258}
1259
1260
69948b07
TH
1261static ssize_t tg_set_conf(struct kernfs_open_file *of,
1262 char *buf, size_t nbytes, loff_t off, bool is_u64)
1263{
1264 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1265 struct blkg_conf_ctx ctx;
1266 struct throtl_grp *tg;
1267 int ret;
1268 u64 v;
1269
faffaab2
TH
1270 blkg_conf_init(&ctx, buf);
1271
a3166c51
YK
1272 ret = blkg_conf_open_bdev(&ctx);
1273 if (ret)
1274 goto out_finish;
1275
1276 if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1277 ret = blk_throtl_init(ctx.bdev->bd_disk);
1278 if (ret)
1279 goto out_finish;
1280 }
1281
faffaab2 1282 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
69948b07 1283 if (ret)
faffaab2 1284 goto out_finish;
69948b07
TH
1285
1286 ret = -EINVAL;
1287 if (sscanf(ctx.body, "%llu", &v) != 1)
1288 goto out_finish;
1289 if (!v)
2ab5492d 1290 v = U64_MAX;
69948b07
TH
1291
1292 tg = blkg_to_tg(ctx.blkg);
a880ae93 1293 tg_update_carryover(tg);
69948b07
TH
1294
1295 if (is_u64)
1296 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1297 else
1298 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
60c2bc2d 1299
9bb67aeb 1300 tg_conf_updated(tg, false);
36aa9e5f
TH
1301 ret = 0;
1302out_finish:
faffaab2 1303 blkg_conf_exit(&ctx);
36aa9e5f 1304 return ret ?: nbytes;
8e89d13f
VG
1305}
1306
451af504
TH
1307static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1308 char *buf, size_t nbytes, loff_t off)
60c2bc2d 1309{
451af504 1310 return tg_set_conf(of, buf, nbytes, off, true);
60c2bc2d
TH
1311}
1312
451af504
TH
1313static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1314 char *buf, size_t nbytes, loff_t off)
60c2bc2d 1315{
451af504 1316 return tg_set_conf(of, buf, nbytes, off, false);
60c2bc2d
TH
1317}
1318
7ca46438
TH
1319static int tg_print_rwstat(struct seq_file *sf, void *v)
1320{
1321 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1322 blkg_prfill_rwstat, &blkcg_policy_throtl,
1323 seq_cft(sf)->private, true);
1324 return 0;
1325}
1326
1327static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1328 struct blkg_policy_data *pd, int off)
1329{
1330 struct blkg_rwstat_sample sum;
1331
1332 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1333 &sum);
1334 return __blkg_prfill_rwstat(sf, pd, &sum);
1335}
1336
1337static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
1338{
1339 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1340 tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1341 seq_cft(sf)->private, true);
1342 return 0;
1343}
1344
880f50e2 1345static struct cftype throtl_legacy_files[] = {
60c2bc2d
TH
1346 {
1347 .name = "throttle.read_bps_device",
bf20ab53 1348 .private = offsetof(struct throtl_grp, bps[READ]),
2da8ca82 1349 .seq_show = tg_print_conf_u64,
451af504 1350 .write = tg_set_conf_u64,
60c2bc2d
TH
1351 },
1352 {
1353 .name = "throttle.write_bps_device",
bf20ab53 1354 .private = offsetof(struct throtl_grp, bps[WRITE]),
2da8ca82 1355 .seq_show = tg_print_conf_u64,
451af504 1356 .write = tg_set_conf_u64,
60c2bc2d
TH
1357 },
1358 {
1359 .name = "throttle.read_iops_device",
bf20ab53 1360 .private = offsetof(struct throtl_grp, iops[READ]),
2da8ca82 1361 .seq_show = tg_print_conf_uint,
451af504 1362 .write = tg_set_conf_uint,
60c2bc2d
TH
1363 },
1364 {
1365 .name = "throttle.write_iops_device",
bf20ab53 1366 .private = offsetof(struct throtl_grp, iops[WRITE]),
2da8ca82 1367 .seq_show = tg_print_conf_uint,
451af504 1368 .write = tg_set_conf_uint,
60c2bc2d
TH
1369 },
1370 {
1371 .name = "throttle.io_service_bytes",
7ca46438
TH
1372 .private = offsetof(struct throtl_grp, stat_bytes),
1373 .seq_show = tg_print_rwstat,
60c2bc2d 1374 },
17534c6f 1375 {
1376 .name = "throttle.io_service_bytes_recursive",
7ca46438
TH
1377 .private = offsetof(struct throtl_grp, stat_bytes),
1378 .seq_show = tg_print_rwstat_recursive,
17534c6f 1379 },
60c2bc2d
TH
1380 {
1381 .name = "throttle.io_serviced",
7ca46438
TH
1382 .private = offsetof(struct throtl_grp, stat_ios),
1383 .seq_show = tg_print_rwstat,
60c2bc2d 1384 },
17534c6f 1385 {
1386 .name = "throttle.io_serviced_recursive",
7ca46438
TH
1387 .private = offsetof(struct throtl_grp, stat_ios),
1388 .seq_show = tg_print_rwstat_recursive,
17534c6f 1389 },
60c2bc2d
TH
1390 { } /* terminate */
1391};
1392
cd5ab1b0 1393static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
2ee867dc
TH
1394 int off)
1395{
1396 struct throtl_grp *tg = pd_to_tg(pd);
1397 const char *dname = blkg_dev_name(pd->blkg);
cd5ab1b0
SL
1398 u64 bps_dft;
1399 unsigned int iops_dft;
2ee867dc
TH
1400
1401 if (!dname)
1402 return 0;
9f626e37 1403
bf20ab53
YK
1404 bps_dft = U64_MAX;
1405 iops_dft = UINT_MAX;
cd5ab1b0 1406
bf20ab53
YK
1407 if (tg->bps_conf[READ] == bps_dft &&
1408 tg->bps_conf[WRITE] == bps_dft &&
1409 tg->iops_conf[READ] == iops_dft &&
1410 tg->iops_conf[WRITE] == iops_dft)
2ee867dc
TH
1411 return 0;
1412
d3a3a086 1413 seq_printf(sf, "%s", dname);
bf20ab53 1414 if (tg->bps_conf[READ] == U64_MAX)
d3a3a086
JG
1415 seq_printf(sf, " rbps=max");
1416 else
bf20ab53 1417 seq_printf(sf, " rbps=%llu", tg->bps_conf[READ]);
d3a3a086 1418
bf20ab53 1419 if (tg->bps_conf[WRITE] == U64_MAX)
d3a3a086
JG
1420 seq_printf(sf, " wbps=max");
1421 else
bf20ab53 1422 seq_printf(sf, " wbps=%llu", tg->bps_conf[WRITE]);
d3a3a086 1423
bf20ab53 1424 if (tg->iops_conf[READ] == UINT_MAX)
d3a3a086
JG
1425 seq_printf(sf, " riops=max");
1426 else
bf20ab53 1427 seq_printf(sf, " riops=%u", tg->iops_conf[READ]);
d3a3a086 1428
bf20ab53 1429 if (tg->iops_conf[WRITE] == UINT_MAX)
d3a3a086
JG
1430 seq_printf(sf, " wiops=max");
1431 else
bf20ab53 1432 seq_printf(sf, " wiops=%u", tg->iops_conf[WRITE]);
d3a3a086 1433
d3a3a086 1434 seq_printf(sf, "\n");
2ee867dc
TH
1435 return 0;
1436}
1437
cd5ab1b0 1438static int tg_print_limit(struct seq_file *sf, void *v)
2ee867dc 1439{
cd5ab1b0 1440 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
2ee867dc
TH
1441 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1442 return 0;
1443}
1444
cd5ab1b0 1445static ssize_t tg_set_limit(struct kernfs_open_file *of,
2ee867dc
TH
1446 char *buf, size_t nbytes, loff_t off)
1447{
1448 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1449 struct blkg_conf_ctx ctx;
1450 struct throtl_grp *tg;
1451 u64 v[4];
1452 int ret;
1453
faffaab2
TH
1454 blkg_conf_init(&ctx, buf);
1455
a3166c51
YK
1456 ret = blkg_conf_open_bdev(&ctx);
1457 if (ret)
1458 goto out_finish;
1459
1460 if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1461 ret = blk_throtl_init(ctx.bdev->bd_disk);
1462 if (ret)
1463 goto out_finish;
1464 }
1465
faffaab2 1466 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
2ee867dc 1467 if (ret)
faffaab2 1468 goto out_finish;
2ee867dc
TH
1469
1470 tg = blkg_to_tg(ctx.blkg);
a880ae93 1471 tg_update_carryover(tg);
2ee867dc 1472
bf20ab53
YK
1473 v[0] = tg->bps[READ];
1474 v[1] = tg->bps[WRITE];
1475 v[2] = tg->iops[READ];
1476 v[3] = tg->iops[WRITE];
2ee867dc
TH
1477
1478 while (true) {
1479 char tok[27]; /* wiops=18446744073709551616 */
1480 char *p;
2ab5492d 1481 u64 val = U64_MAX;
2ee867dc
TH
1482 int len;
1483
1484 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1485 break;
1486 if (tok[0] == '\0')
1487 break;
1488 ctx.body += len;
1489
1490 ret = -EINVAL;
1491 p = tok;
1492 strsep(&p, "=");
1493 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1494 goto out_finish;
1495
1496 ret = -ERANGE;
1497 if (!val)
1498 goto out_finish;
1499
1500 ret = -EINVAL;
5b7048b8 1501 if (!strcmp(tok, "rbps") && val > 1)
2ee867dc 1502 v[0] = val;
5b7048b8 1503 else if (!strcmp(tok, "wbps") && val > 1)
2ee867dc 1504 v[1] = val;
5b7048b8 1505 else if (!strcmp(tok, "riops") && val > 1)
2ee867dc 1506 v[2] = min_t(u64, val, UINT_MAX);
5b7048b8 1507 else if (!strcmp(tok, "wiops") && val > 1)
2ee867dc
TH
1508 v[3] = min_t(u64, val, UINT_MAX);
1509 else
1510 goto out_finish;
1511 }
1512
bf20ab53
YK
1513 tg->bps[READ] = v[0];
1514 tg->bps[WRITE] = v[1];
1515 tg->iops[READ] = v[2];
1516 tg->iops[WRITE] = v[3];
2ee867dc 1517
bf20ab53 1518 tg_conf_updated(tg, false);
2ee867dc
TH
1519 ret = 0;
1520out_finish:
faffaab2 1521 blkg_conf_exit(&ctx);
2ee867dc
TH
1522 return ret ?: nbytes;
1523}
1524
1525static struct cftype throtl_files[] = {
1526 {
1527 .name = "max",
1528 .flags = CFTYPE_NOT_ON_ROOT,
cd5ab1b0
SL
1529 .seq_show = tg_print_limit,
1530 .write = tg_set_limit,
2ee867dc
TH
1531 },
1532 { } /* terminate */
1533};
1534
da527770 1535static void throtl_shutdown_wq(struct request_queue *q)
e43473b7
VG
1536{
1537 struct throtl_data *td = q->td;
1538
69df0ab0 1539 cancel_work_sync(&td->dispatch_work);
e43473b7
VG
1540}
1541
a7b36ee6 1542struct blkcg_policy blkcg_policy_throtl = {
2ee867dc 1543 .dfl_cftypes = throtl_files,
880f50e2 1544 .legacy_cftypes = throtl_legacy_files,
f9fcc2d3 1545
001bea73 1546 .pd_alloc_fn = throtl_pd_alloc,
f9fcc2d3 1547 .pd_init_fn = throtl_pd_init,
693e751e 1548 .pd_online_fn = throtl_pd_online,
001bea73 1549 .pd_free_fn = throtl_pd_free,
e43473b7
VG
1550};
1551
cad9266a 1552void blk_throtl_cancel_bios(struct gendisk *disk)
2d8f7a3b 1553{
cad9266a 1554 struct request_queue *q = disk->queue;
2d8f7a3b
YK
1555 struct cgroup_subsys_state *pos_css;
1556 struct blkcg_gq *blkg;
1557
a3166c51
YK
1558 if (!blk_throtl_activated(q))
1559 return;
1560
2d8f7a3b
YK
1561 spin_lock_irq(&q->queue_lock);
1562 /*
1563 * queue_lock is held, rcu lock is not needed here technically.
1564 * However, rcu lock is still held to emphasize that following
1565 * path need RCU protection and to prevent warning from lockdep.
1566 */
1567 rcu_read_lock();
1231039d 1568 blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
2d8f7a3b
YK
1569 struct throtl_grp *tg = blkg_to_tg(blkg);
1570 struct throtl_service_queue *sq = &tg->service_queue;
1571
1572 /*
1573 * Set the flag to make sure throtl_pending_timer_fn() won't
1574 * stop until all throttled bios are dispatched.
1575 */
eb184791
KS
1576 tg->flags |= THROTL_TG_CANCELING;
1577
1578 /*
1579 * Do not dispatch cgroup without THROTL_TG_PENDING or cgroup
1580 * will be inserted to service queue without THROTL_TG_PENDING
1581 * set in tg_update_disptime below. Then IO dispatched from
1582 * child in tg_dispatch_one_bio will trigger double insertion
1583 * and corrupt the tree.
1584 */
1585 if (!(tg->flags & THROTL_TG_PENDING))
1586 continue;
1587
2d8f7a3b
YK
1588 /*
1589 * Update disptime after setting the above flag to make sure
1590 * throtl_select_dispatch() won't exit without dispatching.
1591 */
1592 tg_update_disptime(tg);
1593
1594 throtl_schedule_pending_timer(sq, jiffies + 1);
1595 }
1596 rcu_read_unlock();
1597 spin_unlock_irq(&q->queue_lock);
1598}
1599
a7b36ee6 1600bool __blk_throtl_bio(struct bio *bio)
e43473b7 1601{
ed6cddef 1602 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
db18a53e 1603 struct blkcg_gq *blkg = bio->bi_blkg;
c5cc2070 1604 struct throtl_qnode *qn = NULL;
a2e83ef9 1605 struct throtl_grp *tg = blkg_to_tg(blkg);
73f0d49a 1606 struct throtl_service_queue *sq;
0e9f4164 1607 bool rw = bio_data_dir(bio);
bc16a4f9 1608 bool throttled = false;
b9147dd1 1609 struct throtl_data *td = tg->td;
e43473b7 1610
93b80638 1611 rcu_read_lock();
0d945c1f 1612 spin_lock_irq(&q->queue_lock);
73f0d49a
TH
1613 sq = &tg->service_queue;
1614
9e660acf 1615 while (true) {
3f0abd80
SL
1616 if (tg->last_low_overflow_time[rw] == 0)
1617 tg->last_low_overflow_time[rw] = jiffies;
9e660acf
TH
1618 /* throtl is FIFO - if bios are already queued, should queue */
1619 if (sq->nr_queued[rw])
1620 break;
de701c74 1621
9e660acf 1622 /* if above limits, break to queue */
c79892c5 1623 if (!tg_may_dispatch(tg, bio, NULL)) {
3f0abd80 1624 tg->last_low_overflow_time[rw] = jiffies;
9e660acf 1625 break;
c79892c5 1626 }
9e660acf
TH
1627
1628 /* within limits, let's charge and dispatch directly */
e43473b7 1629 throtl_charge_bio(tg, bio);
04521db0
VG
1630
1631 /*
1632 * We need to trim slice even when bios are not being queued
1633 * otherwise it might happen that a bio is not queued for
1634 * a long time and slice keeps on extending and trim is not
1635 * called for a long time. Now if limits are reduced suddenly
1636 * we take into account all the IO dispatched so far at new
1637 * low rate and * newly queued IO gets a really long dispatch
1638 * time.
1639 *
1640 * So keep on trimming slice even if bio is not queued.
1641 */
0f3457f6 1642 throtl_trim_slice(tg, rw);
9e660acf
TH
1643
1644 /*
1645 * @bio passed through this layer without being throttled.
b53b072c 1646 * Climb up the ladder. If we're already at the top, it
9e660acf
TH
1647 * can be executed directly.
1648 */
c5cc2070 1649 qn = &tg->qnode_on_parent[rw];
9e660acf
TH
1650 sq = sq->parent_sq;
1651 tg = sq_to_tg(sq);
320fb0f9
YK
1652 if (!tg) {
1653 bio_set_flag(bio, BIO_BPS_THROTTLED);
9e660acf 1654 goto out_unlock;
320fb0f9 1655 }
e43473b7
VG
1656 }
1657
9e660acf 1658 /* out-of-limit, queue to @tg */
fda6f272
TH
1659 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1660 rw == READ ? 'R' : 'W',
9f626e37
SL
1661 tg->bytes_disp[rw], bio->bi_iter.bi_size,
1662 tg_bps_limit(tg, rw),
1663 tg->io_disp[rw], tg_iops_limit(tg, rw),
fda6f272 1664 sq->nr_queued[READ], sq->nr_queued[WRITE]);
e43473b7 1665
3f0abd80
SL
1666 tg->last_low_overflow_time[rw] = jiffies;
1667
b9147dd1 1668 td->nr_queued[rw]++;
c5cc2070 1669 throtl_add_bio_tg(bio, qn, tg);
bc16a4f9 1670 throttled = true;
e43473b7 1671
7f52f98c
TH
1672 /*
1673 * Update @tg's dispatch time and force schedule dispatch if @tg
1674 * was empty before @bio. The forced scheduling isn't likely to
1675 * cause undue delay as @bio is likely to be dispatched directly if
1676 * its @tg's disptime is not in the future.
1677 */
0e9f4164 1678 if (tg->flags & THROTL_TG_WAS_EMPTY) {
77216b04 1679 tg_update_disptime(tg);
7f52f98c 1680 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
e43473b7
VG
1681 }
1682
bc16a4f9 1683out_unlock:
5a011f88
LQ
1684 spin_unlock_irq(&q->queue_lock);
1685
93b80638 1686 rcu_read_unlock();
bc16a4f9 1687 return throttled;
e43473b7
VG
1688}
1689
e13793ba 1690void blk_throtl_exit(struct gendisk *disk)
e43473b7 1691{
e13793ba
CH
1692 struct request_queue *q = disk->queue;
1693
a3166c51
YK
1694 if (!blk_throtl_activated(q))
1695 return;
1696
884f0e84 1697 del_timer_sync(&q->td->service_queue.pending_timer);
da527770 1698 throtl_shutdown_wq(q);
40e4996e 1699 blkcg_deactivate_policy(disk, &blkcg_policy_throtl);
c9a929dd 1700 kfree(q->td);
e43473b7
VG
1701}
1702
1703static int __init throtl_init(void)
1704{
450adcbe
VG
1705 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1706 if (!kthrotld_workqueue)
1707 panic("Failed to create kthrotld\n");
1708
3c798398 1709 return blkcg_policy_register(&blkcg_policy_throtl);
e43473b7
VG
1710}
1711
1712module_init(throtl_init);