ALSA: hda - Add new GPU codec ID 0x10de0083 to snd-hda
[linux-2.6-block.git] / block / blk-throttle.c
CommitLineData
e43473b7
VG
1/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
eea8f41c 12#include <linux/blk-cgroup.h>
bc9fcbf9 13#include "blk.h"
e43473b7
VG
14
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
3c798398 24static struct blkcg_policy blkcg_policy_throtl;
0381411e 25
450adcbe
VG
26/* A workqueue to queue throttle related work */
27static struct workqueue_struct *kthrotld_workqueue;
450adcbe 28
c5cc2070
TH
29/*
30 * To implement hierarchical throttling, throtl_grps form a tree and bios
31 * are dispatched upwards level by level until they reach the top and get
32 * issued. When dispatching bios from the children and local group at each
33 * level, if the bios are dispatched into a single bio_list, there's a risk
34 * of a local or child group which can queue many bios at once filling up
35 * the list starving others.
36 *
37 * To avoid such starvation, dispatched bios are queued separately
38 * according to where they came from. When they are again dispatched to
39 * the parent, they're popped in round-robin order so that no single source
40 * hogs the dispatch window.
41 *
42 * throtl_qnode is used to keep the queued bios separated by their sources.
43 * Bios are queued to throtl_qnode which in turn is queued to
44 * throtl_service_queue and then dispatched in round-robin order.
45 *
46 * It's also used to track the reference counts on blkg's. A qnode always
47 * belongs to a throtl_grp and gets queued on itself or the parent, so
48 * incrementing the reference of the associated throtl_grp when a qnode is
49 * queued and decrementing when dequeued is enough to keep the whole blkg
50 * tree pinned while bios are in flight.
51 */
52struct throtl_qnode {
53 struct list_head node; /* service_queue->queued[] */
54 struct bio_list bios; /* queued bios */
55 struct throtl_grp *tg; /* tg this qnode belongs to */
56};
57
c9e0332e 58struct throtl_service_queue {
77216b04
TH
59 struct throtl_service_queue *parent_sq; /* the parent service_queue */
60
73f0d49a
TH
61 /*
62 * Bios queued directly to this service_queue or dispatched from
63 * children throtl_grp's.
64 */
c5cc2070 65 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
73f0d49a
TH
66 unsigned int nr_queued[2]; /* number of queued bios */
67
68 /*
69 * RB tree of active children throtl_grp's, which are sorted by
70 * their ->disptime.
71 */
c9e0332e
TH
72 struct rb_root pending_tree; /* RB tree of active tgs */
73 struct rb_node *first_pending; /* first node in the tree */
74 unsigned int nr_pending; /* # queued in the tree */
75 unsigned long first_pending_disptime; /* disptime of the first tg */
69df0ab0 76 struct timer_list pending_timer; /* fires on first_pending_disptime */
e43473b7
VG
77};
78
5b2c16aa
TH
79enum tg_state_flags {
80 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
0e9f4164 81 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
5b2c16aa
TH
82};
83
e43473b7
VG
84#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
85
86struct throtl_grp {
f95a04af
TH
87 /* must be the first member */
88 struct blkg_policy_data pd;
89
c9e0332e 90 /* active throtl group service_queue member */
e43473b7
VG
91 struct rb_node rb_node;
92
0f3457f6
TH
93 /* throtl_data this group belongs to */
94 struct throtl_data *td;
95
49a2f1e3
TH
96 /* this group's service queue */
97 struct throtl_service_queue service_queue;
98
c5cc2070
TH
99 /*
100 * qnode_on_self is used when bios are directly queued to this
101 * throtl_grp so that local bios compete fairly with bios
102 * dispatched from children. qnode_on_parent is used when bios are
103 * dispatched from this throtl_grp into its parent and will compete
104 * with the sibling qnode_on_parents and the parent's
105 * qnode_on_self.
106 */
107 struct throtl_qnode qnode_on_self[2];
108 struct throtl_qnode qnode_on_parent[2];
109
e43473b7
VG
110 /*
111 * Dispatch time in jiffies. This is the estimated time when group
112 * will unthrottle and is ready to dispatch more bio. It is used as
113 * key to sort active groups in service tree.
114 */
115 unsigned long disptime;
116
e43473b7
VG
117 unsigned int flags;
118
693e751e
TH
119 /* are there any throtl rules between this group and td? */
120 bool has_rules[2];
121
e43473b7
VG
122 /* bytes per second rate limits */
123 uint64_t bps[2];
124
8e89d13f
VG
125 /* IOPS limits */
126 unsigned int iops[2];
127
e43473b7
VG
128 /* Number of bytes disptached in current slice */
129 uint64_t bytes_disp[2];
8e89d13f
VG
130 /* Number of bio's dispatched in current slice */
131 unsigned int io_disp[2];
e43473b7
VG
132
133 /* When did we start a new slice */
134 unsigned long slice_start[2];
135 unsigned long slice_end[2];
136};
137
138struct throtl_data
139{
e43473b7 140 /* service tree for active throtl groups */
c9e0332e 141 struct throtl_service_queue service_queue;
e43473b7 142
e43473b7
VG
143 struct request_queue *queue;
144
145 /* Total Number of queued bios on READ and WRITE lists */
146 unsigned int nr_queued[2];
147
148 /*
02977e4a 149 * number of total undestroyed groups
e43473b7
VG
150 */
151 unsigned int nr_undestroyed_grps;
152
153 /* Work for dispatching throttled bios */
69df0ab0 154 struct work_struct dispatch_work;
e43473b7
VG
155};
156
69df0ab0
TH
157static void throtl_pending_timer_fn(unsigned long arg);
158
f95a04af
TH
159static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
160{
161 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
162}
163
3c798398 164static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
0381411e 165{
f95a04af 166 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
0381411e
TH
167}
168
3c798398 169static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
0381411e 170{
f95a04af 171 return pd_to_blkg(&tg->pd);
0381411e
TH
172}
173
fda6f272
TH
174/**
175 * sq_to_tg - return the throl_grp the specified service queue belongs to
176 * @sq: the throtl_service_queue of interest
177 *
178 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
179 * embedded in throtl_data, %NULL is returned.
180 */
181static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
182{
183 if (sq && sq->parent_sq)
184 return container_of(sq, struct throtl_grp, service_queue);
185 else
186 return NULL;
187}
188
189/**
190 * sq_to_td - return throtl_data the specified service queue belongs to
191 * @sq: the throtl_service_queue of interest
192 *
193 * A service_queue can be embeded in either a throtl_grp or throtl_data.
194 * Determine the associated throtl_data accordingly and return it.
195 */
196static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
197{
198 struct throtl_grp *tg = sq_to_tg(sq);
199
200 if (tg)
201 return tg->td;
202 else
203 return container_of(sq, struct throtl_data, service_queue);
204}
205
206/**
207 * throtl_log - log debug message via blktrace
208 * @sq: the service_queue being reported
209 * @fmt: printf format string
210 * @args: printf args
211 *
212 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
213 * throtl_grp; otherwise, just "throtl".
214 *
215 * TODO: this should be made a function and name formatting should happen
216 * after testing whether blktrace is enabled.
217 */
218#define throtl_log(sq, fmt, args...) do { \
219 struct throtl_grp *__tg = sq_to_tg((sq)); \
220 struct throtl_data *__td = sq_to_td((sq)); \
221 \
222 (void)__td; \
223 if ((__tg)) { \
224 char __pbuf[128]; \
54e7ed12 225 \
fda6f272
TH
226 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
227 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
228 } else { \
229 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
230 } \
54e7ed12 231} while (0)
e43473b7 232
c5cc2070
TH
233static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
234{
235 INIT_LIST_HEAD(&qn->node);
236 bio_list_init(&qn->bios);
237 qn->tg = tg;
238}
239
240/**
241 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
242 * @bio: bio being added
243 * @qn: qnode to add bio to
244 * @queued: the service_queue->queued[] list @qn belongs to
245 *
246 * Add @bio to @qn and put @qn on @queued if it's not already on.
247 * @qn->tg's reference count is bumped when @qn is activated. See the
248 * comment on top of throtl_qnode definition for details.
249 */
250static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
251 struct list_head *queued)
252{
253 bio_list_add(&qn->bios, bio);
254 if (list_empty(&qn->node)) {
255 list_add_tail(&qn->node, queued);
256 blkg_get(tg_to_blkg(qn->tg));
257 }
258}
259
260/**
261 * throtl_peek_queued - peek the first bio on a qnode list
262 * @queued: the qnode list to peek
263 */
264static struct bio *throtl_peek_queued(struct list_head *queued)
265{
266 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
267 struct bio *bio;
268
269 if (list_empty(queued))
270 return NULL;
271
272 bio = bio_list_peek(&qn->bios);
273 WARN_ON_ONCE(!bio);
274 return bio;
275}
276
277/**
278 * throtl_pop_queued - pop the first bio form a qnode list
279 * @queued: the qnode list to pop a bio from
280 * @tg_to_put: optional out argument for throtl_grp to put
281 *
282 * Pop the first bio from the qnode list @queued. After popping, the first
283 * qnode is removed from @queued if empty or moved to the end of @queued so
284 * that the popping order is round-robin.
285 *
286 * When the first qnode is removed, its associated throtl_grp should be put
287 * too. If @tg_to_put is NULL, this function automatically puts it;
288 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
289 * responsible for putting it.
290 */
291static struct bio *throtl_pop_queued(struct list_head *queued,
292 struct throtl_grp **tg_to_put)
293{
294 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
295 struct bio *bio;
296
297 if (list_empty(queued))
298 return NULL;
299
300 bio = bio_list_pop(&qn->bios);
301 WARN_ON_ONCE(!bio);
302
303 if (bio_list_empty(&qn->bios)) {
304 list_del_init(&qn->node);
305 if (tg_to_put)
306 *tg_to_put = qn->tg;
307 else
308 blkg_put(tg_to_blkg(qn->tg));
309 } else {
310 list_move_tail(&qn->node, queued);
311 }
312
313 return bio;
314}
315
49a2f1e3 316/* init a service_queue, assumes the caller zeroed it */
b2ce2643 317static void throtl_service_queue_init(struct throtl_service_queue *sq)
49a2f1e3 318{
c5cc2070
TH
319 INIT_LIST_HEAD(&sq->queued[0]);
320 INIT_LIST_HEAD(&sq->queued[1]);
49a2f1e3 321 sq->pending_tree = RB_ROOT;
69df0ab0
TH
322 setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
323 (unsigned long)sq);
324}
325
001bea73
TH
326static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
327{
4fb72036 328 struct throtl_grp *tg;
24bdb8ef 329 int rw;
4fb72036
TH
330
331 tg = kzalloc_node(sizeof(*tg), gfp, node);
332 if (!tg)
77ea7338 333 return NULL;
4fb72036 334
b2ce2643
TH
335 throtl_service_queue_init(&tg->service_queue);
336
337 for (rw = READ; rw <= WRITE; rw++) {
338 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
339 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
340 }
341
342 RB_CLEAR_NODE(&tg->rb_node);
343 tg->bps[READ] = -1;
344 tg->bps[WRITE] = -1;
345 tg->iops[READ] = -1;
346 tg->iops[WRITE] = -1;
347
4fb72036 348 return &tg->pd;
001bea73
TH
349}
350
a9520cd6 351static void throtl_pd_init(struct blkg_policy_data *pd)
a29a171e 352{
a9520cd6
TH
353 struct throtl_grp *tg = pd_to_tg(pd);
354 struct blkcg_gq *blkg = tg_to_blkg(tg);
77216b04 355 struct throtl_data *td = blkg->q->td;
b2ce2643 356 struct throtl_service_queue *sq = &tg->service_queue;
cd1604fa 357
9138125b 358 /*
aa6ec29b 359 * If on the default hierarchy, we switch to properly hierarchical
9138125b
TH
360 * behavior where limits on a given throtl_grp are applied to the
361 * whole subtree rather than just the group itself. e.g. If 16M
362 * read_bps limit is set on the root group, the whole system can't
363 * exceed 16M for the device.
364 *
aa6ec29b 365 * If not on the default hierarchy, the broken flat hierarchy
9138125b
TH
366 * behavior is retained where all throtl_grps are treated as if
367 * they're all separate root groups right below throtl_data.
368 * Limits of a group don't interact with limits of other groups
369 * regardless of the position of the group in the hierarchy.
370 */
b2ce2643 371 sq->parent_sq = &td->service_queue;
9e10a130 372 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
b2ce2643 373 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
77216b04 374 tg->td = td;
8a3d2615
TH
375}
376
693e751e
TH
377/*
378 * Set has_rules[] if @tg or any of its parents have limits configured.
379 * This doesn't require walking up to the top of the hierarchy as the
380 * parent's has_rules[] is guaranteed to be correct.
381 */
382static void tg_update_has_rules(struct throtl_grp *tg)
383{
384 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
385 int rw;
386
387 for (rw = READ; rw <= WRITE; rw++)
388 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
389 (tg->bps[rw] != -1 || tg->iops[rw] != -1);
390}
391
a9520cd6 392static void throtl_pd_online(struct blkg_policy_data *pd)
693e751e
TH
393{
394 /*
395 * We don't want new groups to escape the limits of its ancestors.
396 * Update has_rules[] after a new group is brought online.
397 */
a9520cd6 398 tg_update_has_rules(pd_to_tg(pd));
693e751e
TH
399}
400
001bea73
TH
401static void throtl_pd_free(struct blkg_policy_data *pd)
402{
4fb72036
TH
403 struct throtl_grp *tg = pd_to_tg(pd);
404
b2ce2643 405 del_timer_sync(&tg->service_queue.pending_timer);
4fb72036 406 kfree(tg);
001bea73
TH
407}
408
0049af73
TH
409static struct throtl_grp *
410throtl_rb_first(struct throtl_service_queue *parent_sq)
e43473b7
VG
411{
412 /* Service tree is empty */
0049af73 413 if (!parent_sq->nr_pending)
e43473b7
VG
414 return NULL;
415
0049af73
TH
416 if (!parent_sq->first_pending)
417 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
e43473b7 418
0049af73
TH
419 if (parent_sq->first_pending)
420 return rb_entry_tg(parent_sq->first_pending);
e43473b7
VG
421
422 return NULL;
423}
424
425static void rb_erase_init(struct rb_node *n, struct rb_root *root)
426{
427 rb_erase(n, root);
428 RB_CLEAR_NODE(n);
429}
430
0049af73
TH
431static void throtl_rb_erase(struct rb_node *n,
432 struct throtl_service_queue *parent_sq)
e43473b7 433{
0049af73
TH
434 if (parent_sq->first_pending == n)
435 parent_sq->first_pending = NULL;
436 rb_erase_init(n, &parent_sq->pending_tree);
437 --parent_sq->nr_pending;
e43473b7
VG
438}
439
0049af73 440static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
e43473b7
VG
441{
442 struct throtl_grp *tg;
443
0049af73 444 tg = throtl_rb_first(parent_sq);
e43473b7
VG
445 if (!tg)
446 return;
447
0049af73 448 parent_sq->first_pending_disptime = tg->disptime;
e43473b7
VG
449}
450
77216b04 451static void tg_service_queue_add(struct throtl_grp *tg)
e43473b7 452{
77216b04 453 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
0049af73 454 struct rb_node **node = &parent_sq->pending_tree.rb_node;
e43473b7
VG
455 struct rb_node *parent = NULL;
456 struct throtl_grp *__tg;
457 unsigned long key = tg->disptime;
458 int left = 1;
459
460 while (*node != NULL) {
461 parent = *node;
462 __tg = rb_entry_tg(parent);
463
464 if (time_before(key, __tg->disptime))
465 node = &parent->rb_left;
466 else {
467 node = &parent->rb_right;
468 left = 0;
469 }
470 }
471
472 if (left)
0049af73 473 parent_sq->first_pending = &tg->rb_node;
e43473b7
VG
474
475 rb_link_node(&tg->rb_node, parent, node);
0049af73 476 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
e43473b7
VG
477}
478
77216b04 479static void __throtl_enqueue_tg(struct throtl_grp *tg)
e43473b7 480{
77216b04 481 tg_service_queue_add(tg);
5b2c16aa 482 tg->flags |= THROTL_TG_PENDING;
77216b04 483 tg->service_queue.parent_sq->nr_pending++;
e43473b7
VG
484}
485
77216b04 486static void throtl_enqueue_tg(struct throtl_grp *tg)
e43473b7 487{
5b2c16aa 488 if (!(tg->flags & THROTL_TG_PENDING))
77216b04 489 __throtl_enqueue_tg(tg);
e43473b7
VG
490}
491
77216b04 492static void __throtl_dequeue_tg(struct throtl_grp *tg)
e43473b7 493{
77216b04 494 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
5b2c16aa 495 tg->flags &= ~THROTL_TG_PENDING;
e43473b7
VG
496}
497
77216b04 498static void throtl_dequeue_tg(struct throtl_grp *tg)
e43473b7 499{
5b2c16aa 500 if (tg->flags & THROTL_TG_PENDING)
77216b04 501 __throtl_dequeue_tg(tg);
e43473b7
VG
502}
503
a9131a27 504/* Call with queue lock held */
69df0ab0
TH
505static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
506 unsigned long expires)
a9131a27 507{
69df0ab0
TH
508 mod_timer(&sq->pending_timer, expires);
509 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
510 expires - jiffies, jiffies);
a9131a27
TH
511}
512
7f52f98c
TH
513/**
514 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
515 * @sq: the service_queue to schedule dispatch for
516 * @force: force scheduling
517 *
518 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
519 * dispatch time of the first pending child. Returns %true if either timer
520 * is armed or there's no pending child left. %false if the current
521 * dispatch window is still open and the caller should continue
522 * dispatching.
523 *
524 * If @force is %true, the dispatch timer is always scheduled and this
525 * function is guaranteed to return %true. This is to be used when the
526 * caller can't dispatch itself and needs to invoke pending_timer
527 * unconditionally. Note that forced scheduling is likely to induce short
528 * delay before dispatch starts even if @sq->first_pending_disptime is not
529 * in the future and thus shouldn't be used in hot paths.
530 */
531static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
532 bool force)
e43473b7 533{
6a525600 534 /* any pending children left? */
c9e0332e 535 if (!sq->nr_pending)
7f52f98c 536 return true;
e43473b7 537
c9e0332e 538 update_min_dispatch_time(sq);
e43473b7 539
69df0ab0 540 /* is the next dispatch time in the future? */
7f52f98c 541 if (force || time_after(sq->first_pending_disptime, jiffies)) {
69df0ab0 542 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
7f52f98c 543 return true;
69df0ab0
TH
544 }
545
7f52f98c
TH
546 /* tell the caller to continue dispatching */
547 return false;
e43473b7
VG
548}
549
32ee5bc4
VG
550static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
551 bool rw, unsigned long start)
552{
553 tg->bytes_disp[rw] = 0;
554 tg->io_disp[rw] = 0;
555
556 /*
557 * Previous slice has expired. We must have trimmed it after last
558 * bio dispatch. That means since start of last slice, we never used
559 * that bandwidth. Do try to make use of that bandwidth while giving
560 * credit.
561 */
562 if (time_after_eq(start, tg->slice_start[rw]))
563 tg->slice_start[rw] = start;
564
565 tg->slice_end[rw] = jiffies + throtl_slice;
566 throtl_log(&tg->service_queue,
567 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
568 rw == READ ? 'R' : 'W', tg->slice_start[rw],
569 tg->slice_end[rw], jiffies);
570}
571
0f3457f6 572static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
e43473b7
VG
573{
574 tg->bytes_disp[rw] = 0;
8e89d13f 575 tg->io_disp[rw] = 0;
e43473b7
VG
576 tg->slice_start[rw] = jiffies;
577 tg->slice_end[rw] = jiffies + throtl_slice;
fda6f272
TH
578 throtl_log(&tg->service_queue,
579 "[%c] new slice start=%lu end=%lu jiffies=%lu",
580 rw == READ ? 'R' : 'W', tg->slice_start[rw],
581 tg->slice_end[rw], jiffies);
e43473b7
VG
582}
583
0f3457f6
TH
584static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
585 unsigned long jiffy_end)
d1ae8ffd
VG
586{
587 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
588}
589
0f3457f6
TH
590static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
591 unsigned long jiffy_end)
e43473b7
VG
592{
593 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
fda6f272
TH
594 throtl_log(&tg->service_queue,
595 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
596 rw == READ ? 'R' : 'W', tg->slice_start[rw],
597 tg->slice_end[rw], jiffies);
e43473b7
VG
598}
599
600/* Determine if previously allocated or extended slice is complete or not */
0f3457f6 601static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
e43473b7
VG
602{
603 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
5cf8c227 604 return false;
e43473b7
VG
605
606 return 1;
607}
608
609/* Trim the used slices and adjust slice start accordingly */
0f3457f6 610static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
e43473b7 611{
3aad5d3e
VG
612 unsigned long nr_slices, time_elapsed, io_trim;
613 u64 bytes_trim, tmp;
e43473b7
VG
614
615 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
616
617 /*
618 * If bps are unlimited (-1), then time slice don't get
619 * renewed. Don't try to trim the slice if slice is used. A new
620 * slice will start when appropriate.
621 */
0f3457f6 622 if (throtl_slice_used(tg, rw))
e43473b7
VG
623 return;
624
d1ae8ffd
VG
625 /*
626 * A bio has been dispatched. Also adjust slice_end. It might happen
627 * that initially cgroup limit was very low resulting in high
628 * slice_end, but later limit was bumped up and bio was dispached
629 * sooner, then we need to reduce slice_end. A high bogus slice_end
630 * is bad because it does not allow new slice to start.
631 */
632
0f3457f6 633 throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
d1ae8ffd 634
e43473b7
VG
635 time_elapsed = jiffies - tg->slice_start[rw];
636
637 nr_slices = time_elapsed / throtl_slice;
638
639 if (!nr_slices)
640 return;
3aad5d3e
VG
641 tmp = tg->bps[rw] * throtl_slice * nr_slices;
642 do_div(tmp, HZ);
643 bytes_trim = tmp;
e43473b7 644
8e89d13f 645 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
e43473b7 646
8e89d13f 647 if (!bytes_trim && !io_trim)
e43473b7
VG
648 return;
649
650 if (tg->bytes_disp[rw] >= bytes_trim)
651 tg->bytes_disp[rw] -= bytes_trim;
652 else
653 tg->bytes_disp[rw] = 0;
654
8e89d13f
VG
655 if (tg->io_disp[rw] >= io_trim)
656 tg->io_disp[rw] -= io_trim;
657 else
658 tg->io_disp[rw] = 0;
659
e43473b7
VG
660 tg->slice_start[rw] += nr_slices * throtl_slice;
661
fda6f272
TH
662 throtl_log(&tg->service_queue,
663 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
664 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
665 tg->slice_start[rw], tg->slice_end[rw], jiffies);
e43473b7
VG
666}
667
0f3457f6
TH
668static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
669 unsigned long *wait)
e43473b7
VG
670{
671 bool rw = bio_data_dir(bio);
8e89d13f 672 unsigned int io_allowed;
e43473b7 673 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
c49c06e4 674 u64 tmp;
e43473b7 675
8e89d13f 676 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
e43473b7 677
8e89d13f
VG
678 /* Slice has just started. Consider one slice interval */
679 if (!jiffy_elapsed)
680 jiffy_elapsed_rnd = throtl_slice;
681
682 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
683
c49c06e4
VG
684 /*
685 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
686 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
687 * will allow dispatch after 1 second and after that slice should
688 * have been trimmed.
689 */
690
691 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
692 do_div(tmp, HZ);
693
694 if (tmp > UINT_MAX)
695 io_allowed = UINT_MAX;
696 else
697 io_allowed = tmp;
8e89d13f
VG
698
699 if (tg->io_disp[rw] + 1 <= io_allowed) {
e43473b7
VG
700 if (wait)
701 *wait = 0;
5cf8c227 702 return true;
e43473b7
VG
703 }
704
8e89d13f
VG
705 /* Calc approx time to dispatch */
706 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
707
708 if (jiffy_wait > jiffy_elapsed)
709 jiffy_wait = jiffy_wait - jiffy_elapsed;
710 else
711 jiffy_wait = 1;
712
713 if (wait)
714 *wait = jiffy_wait;
715 return 0;
716}
717
0f3457f6
TH
718static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
719 unsigned long *wait)
8e89d13f
VG
720{
721 bool rw = bio_data_dir(bio);
3aad5d3e 722 u64 bytes_allowed, extra_bytes, tmp;
8e89d13f 723 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
e43473b7
VG
724
725 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
726
727 /* Slice has just started. Consider one slice interval */
728 if (!jiffy_elapsed)
729 jiffy_elapsed_rnd = throtl_slice;
730
731 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
732
5e901a2b
VG
733 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
734 do_div(tmp, HZ);
3aad5d3e 735 bytes_allowed = tmp;
e43473b7 736
4f024f37 737 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
e43473b7
VG
738 if (wait)
739 *wait = 0;
5cf8c227 740 return true;
e43473b7
VG
741 }
742
743 /* Calc approx time to dispatch */
4f024f37 744 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
e43473b7
VG
745 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
746
747 if (!jiffy_wait)
748 jiffy_wait = 1;
749
750 /*
751 * This wait time is without taking into consideration the rounding
752 * up we did. Add that time also.
753 */
754 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
e43473b7
VG
755 if (wait)
756 *wait = jiffy_wait;
8e89d13f
VG
757 return 0;
758}
759
760/*
761 * Returns whether one can dispatch a bio or not. Also returns approx number
762 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
763 */
0f3457f6
TH
764static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
765 unsigned long *wait)
8e89d13f
VG
766{
767 bool rw = bio_data_dir(bio);
768 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
769
770 /*
771 * Currently whole state machine of group depends on first bio
772 * queued in the group bio list. So one should not be calling
773 * this function with a different bio if there are other bios
774 * queued.
775 */
73f0d49a 776 BUG_ON(tg->service_queue.nr_queued[rw] &&
c5cc2070 777 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
e43473b7 778
8e89d13f
VG
779 /* If tg->bps = -1, then BW is unlimited */
780 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
781 if (wait)
782 *wait = 0;
5cf8c227 783 return true;
8e89d13f
VG
784 }
785
786 /*
787 * If previous slice expired, start a new one otherwise renew/extend
788 * existing slice to make sure it is at least throtl_slice interval
789 * long since now.
790 */
0f3457f6
TH
791 if (throtl_slice_used(tg, rw))
792 throtl_start_new_slice(tg, rw);
8e89d13f
VG
793 else {
794 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
0f3457f6 795 throtl_extend_slice(tg, rw, jiffies + throtl_slice);
8e89d13f
VG
796 }
797
0f3457f6
TH
798 if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
799 tg_with_in_iops_limit(tg, bio, &iops_wait)) {
8e89d13f
VG
800 if (wait)
801 *wait = 0;
802 return 1;
803 }
804
805 max_wait = max(bps_wait, iops_wait);
806
807 if (wait)
808 *wait = max_wait;
809
810 if (time_before(tg->slice_end[rw], jiffies + max_wait))
0f3457f6 811 throtl_extend_slice(tg, rw, jiffies + max_wait);
e43473b7
VG
812
813 return 0;
814}
815
816static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
817{
818 bool rw = bio_data_dir(bio);
e43473b7
VG
819
820 /* Charge the bio to the group */
4f024f37 821 tg->bytes_disp[rw] += bio->bi_iter.bi_size;
8e89d13f 822 tg->io_disp[rw]++;
e43473b7 823
2a0f61e6
TH
824 /*
825 * REQ_THROTTLED is used to prevent the same bio to be throttled
826 * more than once as a throttled bio will go through blk-throtl the
827 * second time when it eventually gets issued. Set it when a bio
828 * is being charged to a tg.
2a0f61e6 829 */
77ea7338 830 if (!(bio->bi_rw & REQ_THROTTLED))
2a0f61e6 831 bio->bi_rw |= REQ_THROTTLED;
e43473b7
VG
832}
833
c5cc2070
TH
834/**
835 * throtl_add_bio_tg - add a bio to the specified throtl_grp
836 * @bio: bio to add
837 * @qn: qnode to use
838 * @tg: the target throtl_grp
839 *
840 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
841 * tg->qnode_on_self[] is used.
842 */
843static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
844 struct throtl_grp *tg)
e43473b7 845{
73f0d49a 846 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
847 bool rw = bio_data_dir(bio);
848
c5cc2070
TH
849 if (!qn)
850 qn = &tg->qnode_on_self[rw];
851
0e9f4164
TH
852 /*
853 * If @tg doesn't currently have any bios queued in the same
854 * direction, queueing @bio can change when @tg should be
855 * dispatched. Mark that @tg was empty. This is automatically
856 * cleaered on the next tg_update_disptime().
857 */
858 if (!sq->nr_queued[rw])
859 tg->flags |= THROTL_TG_WAS_EMPTY;
860
c5cc2070
TH
861 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
862
73f0d49a 863 sq->nr_queued[rw]++;
77216b04 864 throtl_enqueue_tg(tg);
e43473b7
VG
865}
866
77216b04 867static void tg_update_disptime(struct throtl_grp *tg)
e43473b7 868{
73f0d49a 869 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
870 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
871 struct bio *bio;
872
c5cc2070 873 if ((bio = throtl_peek_queued(&sq->queued[READ])))
0f3457f6 874 tg_may_dispatch(tg, bio, &read_wait);
e43473b7 875
c5cc2070 876 if ((bio = throtl_peek_queued(&sq->queued[WRITE])))
0f3457f6 877 tg_may_dispatch(tg, bio, &write_wait);
e43473b7
VG
878
879 min_wait = min(read_wait, write_wait);
880 disptime = jiffies + min_wait;
881
e43473b7 882 /* Update dispatch time */
77216b04 883 throtl_dequeue_tg(tg);
e43473b7 884 tg->disptime = disptime;
77216b04 885 throtl_enqueue_tg(tg);
0e9f4164
TH
886
887 /* see throtl_add_bio_tg() */
888 tg->flags &= ~THROTL_TG_WAS_EMPTY;
e43473b7
VG
889}
890
32ee5bc4
VG
891static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
892 struct throtl_grp *parent_tg, bool rw)
893{
894 if (throtl_slice_used(parent_tg, rw)) {
895 throtl_start_new_slice_with_credit(parent_tg, rw,
896 child_tg->slice_start[rw]);
897 }
898
899}
900
77216b04 901static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
e43473b7 902{
73f0d49a 903 struct throtl_service_queue *sq = &tg->service_queue;
6bc9c2b4
TH
904 struct throtl_service_queue *parent_sq = sq->parent_sq;
905 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
c5cc2070 906 struct throtl_grp *tg_to_put = NULL;
e43473b7
VG
907 struct bio *bio;
908
c5cc2070
TH
909 /*
910 * @bio is being transferred from @tg to @parent_sq. Popping a bio
911 * from @tg may put its reference and @parent_sq might end up
912 * getting released prematurely. Remember the tg to put and put it
913 * after @bio is transferred to @parent_sq.
914 */
915 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
73f0d49a 916 sq->nr_queued[rw]--;
e43473b7
VG
917
918 throtl_charge_bio(tg, bio);
6bc9c2b4
TH
919
920 /*
921 * If our parent is another tg, we just need to transfer @bio to
922 * the parent using throtl_add_bio_tg(). If our parent is
923 * @td->service_queue, @bio is ready to be issued. Put it on its
924 * bio_lists[] and decrease total number queued. The caller is
925 * responsible for issuing these bios.
926 */
927 if (parent_tg) {
c5cc2070 928 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
32ee5bc4 929 start_parent_slice_with_credit(tg, parent_tg, rw);
6bc9c2b4 930 } else {
c5cc2070
TH
931 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
932 &parent_sq->queued[rw]);
6bc9c2b4
TH
933 BUG_ON(tg->td->nr_queued[rw] <= 0);
934 tg->td->nr_queued[rw]--;
935 }
e43473b7 936
0f3457f6 937 throtl_trim_slice(tg, rw);
6bc9c2b4 938
c5cc2070
TH
939 if (tg_to_put)
940 blkg_put(tg_to_blkg(tg_to_put));
e43473b7
VG
941}
942
77216b04 943static int throtl_dispatch_tg(struct throtl_grp *tg)
e43473b7 944{
73f0d49a 945 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
946 unsigned int nr_reads = 0, nr_writes = 0;
947 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
c2f6805d 948 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
e43473b7
VG
949 struct bio *bio;
950
951 /* Try to dispatch 75% READS and 25% WRITES */
952
c5cc2070 953 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
0f3457f6 954 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 955
77216b04 956 tg_dispatch_one_bio(tg, bio_data_dir(bio));
e43473b7
VG
957 nr_reads++;
958
959 if (nr_reads >= max_nr_reads)
960 break;
961 }
962
c5cc2070 963 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
0f3457f6 964 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 965
77216b04 966 tg_dispatch_one_bio(tg, bio_data_dir(bio));
e43473b7
VG
967 nr_writes++;
968
969 if (nr_writes >= max_nr_writes)
970 break;
971 }
972
973 return nr_reads + nr_writes;
974}
975
651930bc 976static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
e43473b7
VG
977{
978 unsigned int nr_disp = 0;
e43473b7
VG
979
980 while (1) {
73f0d49a
TH
981 struct throtl_grp *tg = throtl_rb_first(parent_sq);
982 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
983
984 if (!tg)
985 break;
986
987 if (time_before(jiffies, tg->disptime))
988 break;
989
77216b04 990 throtl_dequeue_tg(tg);
e43473b7 991
77216b04 992 nr_disp += throtl_dispatch_tg(tg);
e43473b7 993
73f0d49a 994 if (sq->nr_queued[0] || sq->nr_queued[1])
77216b04 995 tg_update_disptime(tg);
e43473b7
VG
996
997 if (nr_disp >= throtl_quantum)
998 break;
999 }
1000
1001 return nr_disp;
1002}
1003
6e1a5704
TH
1004/**
1005 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1006 * @arg: the throtl_service_queue being serviced
1007 *
1008 * This timer is armed when a child throtl_grp with active bio's become
1009 * pending and queued on the service_queue's pending_tree and expires when
1010 * the first child throtl_grp should be dispatched. This function
2e48a530
TH
1011 * dispatches bio's from the children throtl_grps to the parent
1012 * service_queue.
1013 *
1014 * If the parent's parent is another throtl_grp, dispatching is propagated
1015 * by either arming its pending_timer or repeating dispatch directly. If
1016 * the top-level service_tree is reached, throtl_data->dispatch_work is
1017 * kicked so that the ready bio's are issued.
6e1a5704 1018 */
69df0ab0
TH
1019static void throtl_pending_timer_fn(unsigned long arg)
1020{
1021 struct throtl_service_queue *sq = (void *)arg;
2e48a530 1022 struct throtl_grp *tg = sq_to_tg(sq);
69df0ab0 1023 struct throtl_data *td = sq_to_td(sq);
cb76199c 1024 struct request_queue *q = td->queue;
2e48a530
TH
1025 struct throtl_service_queue *parent_sq;
1026 bool dispatched;
6e1a5704 1027 int ret;
e43473b7
VG
1028
1029 spin_lock_irq(q->queue_lock);
2e48a530
TH
1030again:
1031 parent_sq = sq->parent_sq;
1032 dispatched = false;
e43473b7 1033
7f52f98c
TH
1034 while (true) {
1035 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
2e48a530
TH
1036 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1037 sq->nr_queued[READ], sq->nr_queued[WRITE]);
7f52f98c
TH
1038
1039 ret = throtl_select_dispatch(sq);
1040 if (ret) {
7f52f98c
TH
1041 throtl_log(sq, "bios disp=%u", ret);
1042 dispatched = true;
1043 }
e43473b7 1044
7f52f98c
TH
1045 if (throtl_schedule_next_dispatch(sq, false))
1046 break;
e43473b7 1047
7f52f98c
TH
1048 /* this dispatch windows is still open, relax and repeat */
1049 spin_unlock_irq(q->queue_lock);
1050 cpu_relax();
1051 spin_lock_irq(q->queue_lock);
651930bc 1052 }
e43473b7 1053
2e48a530
TH
1054 if (!dispatched)
1055 goto out_unlock;
6e1a5704 1056
2e48a530
TH
1057 if (parent_sq) {
1058 /* @parent_sq is another throl_grp, propagate dispatch */
1059 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1060 tg_update_disptime(tg);
1061 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1062 /* window is already open, repeat dispatching */
1063 sq = parent_sq;
1064 tg = sq_to_tg(sq);
1065 goto again;
1066 }
1067 }
1068 } else {
1069 /* reached the top-level, queue issueing */
1070 queue_work(kthrotld_workqueue, &td->dispatch_work);
1071 }
1072out_unlock:
e43473b7 1073 spin_unlock_irq(q->queue_lock);
6e1a5704 1074}
e43473b7 1075
6e1a5704
TH
1076/**
1077 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1078 * @work: work item being executed
1079 *
1080 * This function is queued for execution when bio's reach the bio_lists[]
1081 * of throtl_data->service_queue. Those bio's are ready and issued by this
1082 * function.
1083 */
8876e140 1084static void blk_throtl_dispatch_work_fn(struct work_struct *work)
6e1a5704
TH
1085{
1086 struct throtl_data *td = container_of(work, struct throtl_data,
1087 dispatch_work);
1088 struct throtl_service_queue *td_sq = &td->service_queue;
1089 struct request_queue *q = td->queue;
1090 struct bio_list bio_list_on_stack;
1091 struct bio *bio;
1092 struct blk_plug plug;
1093 int rw;
1094
1095 bio_list_init(&bio_list_on_stack);
1096
1097 spin_lock_irq(q->queue_lock);
c5cc2070
TH
1098 for (rw = READ; rw <= WRITE; rw++)
1099 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1100 bio_list_add(&bio_list_on_stack, bio);
6e1a5704
TH
1101 spin_unlock_irq(q->queue_lock);
1102
1103 if (!bio_list_empty(&bio_list_on_stack)) {
69d60eb9 1104 blk_start_plug(&plug);
e43473b7
VG
1105 while((bio = bio_list_pop(&bio_list_on_stack)))
1106 generic_make_request(bio);
69d60eb9 1107 blk_finish_plug(&plug);
e43473b7 1108 }
e43473b7
VG
1109}
1110
f95a04af
TH
1111static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1112 int off)
60c2bc2d 1113{
f95a04af
TH
1114 struct throtl_grp *tg = pd_to_tg(pd);
1115 u64 v = *(u64 *)((void *)tg + off);
60c2bc2d 1116
af133ceb 1117 if (v == -1)
60c2bc2d 1118 return 0;
f95a04af 1119 return __blkg_prfill_u64(sf, pd, v);
60c2bc2d
TH
1120}
1121
f95a04af
TH
1122static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1123 int off)
e43473b7 1124{
f95a04af
TH
1125 struct throtl_grp *tg = pd_to_tg(pd);
1126 unsigned int v = *(unsigned int *)((void *)tg + off);
fe071437 1127
af133ceb
TH
1128 if (v == -1)
1129 return 0;
f95a04af 1130 return __blkg_prfill_u64(sf, pd, v);
e43473b7
VG
1131}
1132
2da8ca82 1133static int tg_print_conf_u64(struct seq_file *sf, void *v)
8e89d13f 1134{
2da8ca82
TH
1135 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1136 &blkcg_policy_throtl, seq_cft(sf)->private, false);
af133ceb 1137 return 0;
8e89d13f
VG
1138}
1139
2da8ca82 1140static int tg_print_conf_uint(struct seq_file *sf, void *v)
8e89d13f 1141{
2da8ca82
TH
1142 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1143 &blkcg_policy_throtl, seq_cft(sf)->private, false);
af133ceb 1144 return 0;
60c2bc2d
TH
1145}
1146
69948b07 1147static void tg_conf_updated(struct throtl_grp *tg)
60c2bc2d 1148{
69948b07 1149 struct throtl_service_queue *sq = &tg->service_queue;
492eb21b 1150 struct cgroup_subsys_state *pos_css;
69948b07 1151 struct blkcg_gq *blkg;
af133ceb 1152
fda6f272
TH
1153 throtl_log(&tg->service_queue,
1154 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1155 tg->bps[READ], tg->bps[WRITE],
1156 tg->iops[READ], tg->iops[WRITE]);
632b4493 1157
693e751e
TH
1158 /*
1159 * Update has_rules[] flags for the updated tg's subtree. A tg is
1160 * considered to have rules if either the tg itself or any of its
1161 * ancestors has rules. This identifies groups without any
1162 * restrictions in the whole hierarchy and allows them to bypass
1163 * blk-throttle.
1164 */
69948b07 1165 blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg))
693e751e
TH
1166 tg_update_has_rules(blkg_to_tg(blkg));
1167
632b4493
TH
1168 /*
1169 * We're already holding queue_lock and know @tg is valid. Let's
1170 * apply the new config directly.
1171 *
1172 * Restart the slices for both READ and WRITES. It might happen
1173 * that a group's limit are dropped suddenly and we don't want to
1174 * account recently dispatched IO with new low rate.
1175 */
0f3457f6
TH
1176 throtl_start_new_slice(tg, 0);
1177 throtl_start_new_slice(tg, 1);
632b4493 1178
5b2c16aa 1179 if (tg->flags & THROTL_TG_PENDING) {
77216b04 1180 tg_update_disptime(tg);
7f52f98c 1181 throtl_schedule_next_dispatch(sq->parent_sq, true);
632b4493 1182 }
69948b07
TH
1183}
1184
1185static ssize_t tg_set_conf(struct kernfs_open_file *of,
1186 char *buf, size_t nbytes, loff_t off, bool is_u64)
1187{
1188 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1189 struct blkg_conf_ctx ctx;
1190 struct throtl_grp *tg;
1191 int ret;
1192 u64 v;
1193
1194 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1195 if (ret)
1196 return ret;
1197
1198 ret = -EINVAL;
1199 if (sscanf(ctx.body, "%llu", &v) != 1)
1200 goto out_finish;
1201 if (!v)
1202 v = -1;
1203
1204 tg = blkg_to_tg(ctx.blkg);
1205
1206 if (is_u64)
1207 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1208 else
1209 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
60c2bc2d 1210
69948b07 1211 tg_conf_updated(tg);
36aa9e5f
TH
1212 ret = 0;
1213out_finish:
60c2bc2d 1214 blkg_conf_finish(&ctx);
36aa9e5f 1215 return ret ?: nbytes;
8e89d13f
VG
1216}
1217
451af504
TH
1218static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1219 char *buf, size_t nbytes, loff_t off)
60c2bc2d 1220{
451af504 1221 return tg_set_conf(of, buf, nbytes, off, true);
60c2bc2d
TH
1222}
1223
451af504
TH
1224static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1225 char *buf, size_t nbytes, loff_t off)
60c2bc2d 1226{
451af504 1227 return tg_set_conf(of, buf, nbytes, off, false);
60c2bc2d
TH
1228}
1229
880f50e2 1230static struct cftype throtl_legacy_files[] = {
60c2bc2d
TH
1231 {
1232 .name = "throttle.read_bps_device",
af133ceb 1233 .private = offsetof(struct throtl_grp, bps[READ]),
2da8ca82 1234 .seq_show = tg_print_conf_u64,
451af504 1235 .write = tg_set_conf_u64,
60c2bc2d
TH
1236 },
1237 {
1238 .name = "throttle.write_bps_device",
af133ceb 1239 .private = offsetof(struct throtl_grp, bps[WRITE]),
2da8ca82 1240 .seq_show = tg_print_conf_u64,
451af504 1241 .write = tg_set_conf_u64,
60c2bc2d
TH
1242 },
1243 {
1244 .name = "throttle.read_iops_device",
af133ceb 1245 .private = offsetof(struct throtl_grp, iops[READ]),
2da8ca82 1246 .seq_show = tg_print_conf_uint,
451af504 1247 .write = tg_set_conf_uint,
60c2bc2d
TH
1248 },
1249 {
1250 .name = "throttle.write_iops_device",
af133ceb 1251 .private = offsetof(struct throtl_grp, iops[WRITE]),
2da8ca82 1252 .seq_show = tg_print_conf_uint,
451af504 1253 .write = tg_set_conf_uint,
60c2bc2d
TH
1254 },
1255 {
1256 .name = "throttle.io_service_bytes",
77ea7338
TH
1257 .private = (unsigned long)&blkcg_policy_throtl,
1258 .seq_show = blkg_print_stat_bytes,
60c2bc2d
TH
1259 },
1260 {
1261 .name = "throttle.io_serviced",
77ea7338
TH
1262 .private = (unsigned long)&blkcg_policy_throtl,
1263 .seq_show = blkg_print_stat_ios,
60c2bc2d
TH
1264 },
1265 { } /* terminate */
1266};
1267
2ee867dc
TH
1268static u64 tg_prfill_max(struct seq_file *sf, struct blkg_policy_data *pd,
1269 int off)
1270{
1271 struct throtl_grp *tg = pd_to_tg(pd);
1272 const char *dname = blkg_dev_name(pd->blkg);
1273 char bufs[4][21] = { "max", "max", "max", "max" };
1274
1275 if (!dname)
1276 return 0;
1277 if (tg->bps[READ] == -1 && tg->bps[WRITE] == -1 &&
1278 tg->iops[READ] == -1 && tg->iops[WRITE] == -1)
1279 return 0;
1280
1281 if (tg->bps[READ] != -1)
1282 snprintf(bufs[0], sizeof(bufs[0]), "%llu", tg->bps[READ]);
1283 if (tg->bps[WRITE] != -1)
1284 snprintf(bufs[1], sizeof(bufs[1]), "%llu", tg->bps[WRITE]);
1285 if (tg->iops[READ] != -1)
1286 snprintf(bufs[2], sizeof(bufs[2]), "%u", tg->iops[READ]);
1287 if (tg->iops[WRITE] != -1)
1288 snprintf(bufs[3], sizeof(bufs[3]), "%u", tg->iops[WRITE]);
1289
1290 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s\n",
1291 dname, bufs[0], bufs[1], bufs[2], bufs[3]);
1292 return 0;
1293}
1294
1295static int tg_print_max(struct seq_file *sf, void *v)
1296{
1297 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_max,
1298 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1299 return 0;
1300}
1301
1302static ssize_t tg_set_max(struct kernfs_open_file *of,
1303 char *buf, size_t nbytes, loff_t off)
1304{
1305 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1306 struct blkg_conf_ctx ctx;
1307 struct throtl_grp *tg;
1308 u64 v[4];
1309 int ret;
1310
1311 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1312 if (ret)
1313 return ret;
1314
1315 tg = blkg_to_tg(ctx.blkg);
1316
1317 v[0] = tg->bps[READ];
1318 v[1] = tg->bps[WRITE];
1319 v[2] = tg->iops[READ];
1320 v[3] = tg->iops[WRITE];
1321
1322 while (true) {
1323 char tok[27]; /* wiops=18446744073709551616 */
1324 char *p;
1325 u64 val = -1;
1326 int len;
1327
1328 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1329 break;
1330 if (tok[0] == '\0')
1331 break;
1332 ctx.body += len;
1333
1334 ret = -EINVAL;
1335 p = tok;
1336 strsep(&p, "=");
1337 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1338 goto out_finish;
1339
1340 ret = -ERANGE;
1341 if (!val)
1342 goto out_finish;
1343
1344 ret = -EINVAL;
1345 if (!strcmp(tok, "rbps"))
1346 v[0] = val;
1347 else if (!strcmp(tok, "wbps"))
1348 v[1] = val;
1349 else if (!strcmp(tok, "riops"))
1350 v[2] = min_t(u64, val, UINT_MAX);
1351 else if (!strcmp(tok, "wiops"))
1352 v[3] = min_t(u64, val, UINT_MAX);
1353 else
1354 goto out_finish;
1355 }
1356
1357 tg->bps[READ] = v[0];
1358 tg->bps[WRITE] = v[1];
1359 tg->iops[READ] = v[2];
1360 tg->iops[WRITE] = v[3];
1361
1362 tg_conf_updated(tg);
1363 ret = 0;
1364out_finish:
1365 blkg_conf_finish(&ctx);
1366 return ret ?: nbytes;
1367}
1368
1369static struct cftype throtl_files[] = {
1370 {
1371 .name = "max",
1372 .flags = CFTYPE_NOT_ON_ROOT,
1373 .seq_show = tg_print_max,
1374 .write = tg_set_max,
1375 },
1376 { } /* terminate */
1377};
1378
da527770 1379static void throtl_shutdown_wq(struct request_queue *q)
e43473b7
VG
1380{
1381 struct throtl_data *td = q->td;
1382
69df0ab0 1383 cancel_work_sync(&td->dispatch_work);
e43473b7
VG
1384}
1385
3c798398 1386static struct blkcg_policy blkcg_policy_throtl = {
2ee867dc 1387 .dfl_cftypes = throtl_files,
880f50e2 1388 .legacy_cftypes = throtl_legacy_files,
f9fcc2d3 1389
001bea73 1390 .pd_alloc_fn = throtl_pd_alloc,
f9fcc2d3 1391 .pd_init_fn = throtl_pd_init,
693e751e 1392 .pd_online_fn = throtl_pd_online,
001bea73 1393 .pd_free_fn = throtl_pd_free,
e43473b7
VG
1394};
1395
ae118896
TH
1396bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
1397 struct bio *bio)
e43473b7 1398{
c5cc2070 1399 struct throtl_qnode *qn = NULL;
ae118896 1400 struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
73f0d49a 1401 struct throtl_service_queue *sq;
0e9f4164 1402 bool rw = bio_data_dir(bio);
bc16a4f9 1403 bool throttled = false;
e43473b7 1404
ae118896
TH
1405 WARN_ON_ONCE(!rcu_read_lock_held());
1406
2a0f61e6 1407 /* see throtl_charge_bio() */
ae118896 1408 if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
bc16a4f9 1409 goto out;
e43473b7
VG
1410
1411 spin_lock_irq(q->queue_lock);
c9589f03
TH
1412
1413 if (unlikely(blk_queue_bypass(q)))
bc16a4f9 1414 goto out_unlock;
f469a7b4 1415
73f0d49a
TH
1416 sq = &tg->service_queue;
1417
9e660acf
TH
1418 while (true) {
1419 /* throtl is FIFO - if bios are already queued, should queue */
1420 if (sq->nr_queued[rw])
1421 break;
de701c74 1422
9e660acf
TH
1423 /* if above limits, break to queue */
1424 if (!tg_may_dispatch(tg, bio, NULL))
1425 break;
1426
1427 /* within limits, let's charge and dispatch directly */
e43473b7 1428 throtl_charge_bio(tg, bio);
04521db0
VG
1429
1430 /*
1431 * We need to trim slice even when bios are not being queued
1432 * otherwise it might happen that a bio is not queued for
1433 * a long time and slice keeps on extending and trim is not
1434 * called for a long time. Now if limits are reduced suddenly
1435 * we take into account all the IO dispatched so far at new
1436 * low rate and * newly queued IO gets a really long dispatch
1437 * time.
1438 *
1439 * So keep on trimming slice even if bio is not queued.
1440 */
0f3457f6 1441 throtl_trim_slice(tg, rw);
9e660acf
TH
1442
1443 /*
1444 * @bio passed through this layer without being throttled.
1445 * Climb up the ladder. If we''re already at the top, it
1446 * can be executed directly.
1447 */
c5cc2070 1448 qn = &tg->qnode_on_parent[rw];
9e660acf
TH
1449 sq = sq->parent_sq;
1450 tg = sq_to_tg(sq);
1451 if (!tg)
1452 goto out_unlock;
e43473b7
VG
1453 }
1454
9e660acf 1455 /* out-of-limit, queue to @tg */
fda6f272
TH
1456 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1457 rw == READ ? 'R' : 'W',
4f024f37 1458 tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
fda6f272
TH
1459 tg->io_disp[rw], tg->iops[rw],
1460 sq->nr_queued[READ], sq->nr_queued[WRITE]);
e43473b7 1461
671058fb 1462 bio_associate_current(bio);
6bc9c2b4 1463 tg->td->nr_queued[rw]++;
c5cc2070 1464 throtl_add_bio_tg(bio, qn, tg);
bc16a4f9 1465 throttled = true;
e43473b7 1466
7f52f98c
TH
1467 /*
1468 * Update @tg's dispatch time and force schedule dispatch if @tg
1469 * was empty before @bio. The forced scheduling isn't likely to
1470 * cause undue delay as @bio is likely to be dispatched directly if
1471 * its @tg's disptime is not in the future.
1472 */
0e9f4164 1473 if (tg->flags & THROTL_TG_WAS_EMPTY) {
77216b04 1474 tg_update_disptime(tg);
7f52f98c 1475 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
e43473b7
VG
1476 }
1477
bc16a4f9 1478out_unlock:
e43473b7 1479 spin_unlock_irq(q->queue_lock);
bc16a4f9 1480out:
2a0f61e6
TH
1481 /*
1482 * As multiple blk-throtls may stack in the same issue path, we
1483 * don't want bios to leave with the flag set. Clear the flag if
1484 * being issued.
1485 */
1486 if (!throttled)
1487 bio->bi_rw &= ~REQ_THROTTLED;
bc16a4f9 1488 return throttled;
e43473b7
VG
1489}
1490
2a12f0dc
TH
1491/*
1492 * Dispatch all bios from all children tg's queued on @parent_sq. On
1493 * return, @parent_sq is guaranteed to not have any active children tg's
1494 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
1495 */
1496static void tg_drain_bios(struct throtl_service_queue *parent_sq)
1497{
1498 struct throtl_grp *tg;
1499
1500 while ((tg = throtl_rb_first(parent_sq))) {
1501 struct throtl_service_queue *sq = &tg->service_queue;
1502 struct bio *bio;
1503
1504 throtl_dequeue_tg(tg);
1505
c5cc2070 1506 while ((bio = throtl_peek_queued(&sq->queued[READ])))
2a12f0dc 1507 tg_dispatch_one_bio(tg, bio_data_dir(bio));
c5cc2070 1508 while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2a12f0dc
TH
1509 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1510 }
1511}
1512
c9a929dd
TH
1513/**
1514 * blk_throtl_drain - drain throttled bios
1515 * @q: request_queue to drain throttled bios for
1516 *
1517 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1518 */
1519void blk_throtl_drain(struct request_queue *q)
1520 __releases(q->queue_lock) __acquires(q->queue_lock)
1521{
1522 struct throtl_data *td = q->td;
2a12f0dc 1523 struct blkcg_gq *blkg;
492eb21b 1524 struct cgroup_subsys_state *pos_css;
c9a929dd 1525 struct bio *bio;
651930bc 1526 int rw;
c9a929dd 1527
8bcb6c7d 1528 queue_lockdep_assert_held(q);
2a12f0dc 1529 rcu_read_lock();
c9a929dd 1530
2a12f0dc
TH
1531 /*
1532 * Drain each tg while doing post-order walk on the blkg tree, so
1533 * that all bios are propagated to td->service_queue. It'd be
1534 * better to walk service_queue tree directly but blkg walk is
1535 * easier.
1536 */
492eb21b 1537 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2a12f0dc 1538 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
73f0d49a 1539
2a12f0dc
TH
1540 /* finally, transfer bios from top-level tg's into the td */
1541 tg_drain_bios(&td->service_queue);
1542
1543 rcu_read_unlock();
c9a929dd
TH
1544 spin_unlock_irq(q->queue_lock);
1545
2a12f0dc 1546 /* all bios now should be in td->service_queue, issue them */
651930bc 1547 for (rw = READ; rw <= WRITE; rw++)
c5cc2070
TH
1548 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
1549 NULL)))
651930bc 1550 generic_make_request(bio);
c9a929dd
TH
1551
1552 spin_lock_irq(q->queue_lock);
1553}
1554
e43473b7
VG
1555int blk_throtl_init(struct request_queue *q)
1556{
1557 struct throtl_data *td;
a2b1693b 1558 int ret;
e43473b7
VG
1559
1560 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1561 if (!td)
1562 return -ENOMEM;
1563
69df0ab0 1564 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
b2ce2643 1565 throtl_service_queue_init(&td->service_queue);
e43473b7 1566
cd1604fa 1567 q->td = td;
29b12589 1568 td->queue = q;
02977e4a 1569
a2b1693b 1570 /* activate policy */
3c798398 1571 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
a2b1693b 1572 if (ret)
f51b802c 1573 kfree(td);
a2b1693b 1574 return ret;
e43473b7
VG
1575}
1576
1577void blk_throtl_exit(struct request_queue *q)
1578{
c875f4d0 1579 BUG_ON(!q->td);
da527770 1580 throtl_shutdown_wq(q);
3c798398 1581 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
c9a929dd 1582 kfree(q->td);
e43473b7
VG
1583}
1584
1585static int __init throtl_init(void)
1586{
450adcbe
VG
1587 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1588 if (!kthrotld_workqueue)
1589 panic("Failed to create kthrotld\n");
1590
3c798398 1591 return blkcg_policy_register(&blkcg_policy_throtl);
e43473b7
VG
1592}
1593
1594module_init(throtl_init);