blk-throttle: implement throtl_grp->has_rules[]
[linux-2.6-block.git] / block / blk-throttle.c
CommitLineData
e43473b7
VG
1/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
12#include "blk-cgroup.h"
bc9fcbf9 13#include "blk.h"
e43473b7
VG
14
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
3c798398 24static struct blkcg_policy blkcg_policy_throtl;
0381411e 25
450adcbe
VG
26/* A workqueue to queue throttle related work */
27static struct workqueue_struct *kthrotld_workqueue;
450adcbe 28
c5cc2070
TH
29/*
30 * To implement hierarchical throttling, throtl_grps form a tree and bios
31 * are dispatched upwards level by level until they reach the top and get
32 * issued. When dispatching bios from the children and local group at each
33 * level, if the bios are dispatched into a single bio_list, there's a risk
34 * of a local or child group which can queue many bios at once filling up
35 * the list starving others.
36 *
37 * To avoid such starvation, dispatched bios are queued separately
38 * according to where they came from. When they are again dispatched to
39 * the parent, they're popped in round-robin order so that no single source
40 * hogs the dispatch window.
41 *
42 * throtl_qnode is used to keep the queued bios separated by their sources.
43 * Bios are queued to throtl_qnode which in turn is queued to
44 * throtl_service_queue and then dispatched in round-robin order.
45 *
46 * It's also used to track the reference counts on blkg's. A qnode always
47 * belongs to a throtl_grp and gets queued on itself or the parent, so
48 * incrementing the reference of the associated throtl_grp when a qnode is
49 * queued and decrementing when dequeued is enough to keep the whole blkg
50 * tree pinned while bios are in flight.
51 */
52struct throtl_qnode {
53 struct list_head node; /* service_queue->queued[] */
54 struct bio_list bios; /* queued bios */
55 struct throtl_grp *tg; /* tg this qnode belongs to */
56};
57
c9e0332e 58struct throtl_service_queue {
77216b04
TH
59 struct throtl_service_queue *parent_sq; /* the parent service_queue */
60
73f0d49a
TH
61 /*
62 * Bios queued directly to this service_queue or dispatched from
63 * children throtl_grp's.
64 */
c5cc2070 65 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
73f0d49a
TH
66 unsigned int nr_queued[2]; /* number of queued bios */
67
68 /*
69 * RB tree of active children throtl_grp's, which are sorted by
70 * their ->disptime.
71 */
c9e0332e
TH
72 struct rb_root pending_tree; /* RB tree of active tgs */
73 struct rb_node *first_pending; /* first node in the tree */
74 unsigned int nr_pending; /* # queued in the tree */
75 unsigned long first_pending_disptime; /* disptime of the first tg */
69df0ab0 76 struct timer_list pending_timer; /* fires on first_pending_disptime */
e43473b7
VG
77};
78
5b2c16aa
TH
79enum tg_state_flags {
80 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
0e9f4164 81 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
5b2c16aa
TH
82};
83
e43473b7
VG
84#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
85
8a3d2615
TH
86/* Per-cpu group stats */
87struct tg_stats_cpu {
88 /* total bytes transferred */
89 struct blkg_rwstat service_bytes;
90 /* total IOs serviced, post merge */
91 struct blkg_rwstat serviced;
92};
93
e43473b7 94struct throtl_grp {
f95a04af
TH
95 /* must be the first member */
96 struct blkg_policy_data pd;
97
c9e0332e 98 /* active throtl group service_queue member */
e43473b7
VG
99 struct rb_node rb_node;
100
0f3457f6
TH
101 /* throtl_data this group belongs to */
102 struct throtl_data *td;
103
49a2f1e3
TH
104 /* this group's service queue */
105 struct throtl_service_queue service_queue;
106
c5cc2070
TH
107 /*
108 * qnode_on_self is used when bios are directly queued to this
109 * throtl_grp so that local bios compete fairly with bios
110 * dispatched from children. qnode_on_parent is used when bios are
111 * dispatched from this throtl_grp into its parent and will compete
112 * with the sibling qnode_on_parents and the parent's
113 * qnode_on_self.
114 */
115 struct throtl_qnode qnode_on_self[2];
116 struct throtl_qnode qnode_on_parent[2];
117
e43473b7
VG
118 /*
119 * Dispatch time in jiffies. This is the estimated time when group
120 * will unthrottle and is ready to dispatch more bio. It is used as
121 * key to sort active groups in service tree.
122 */
123 unsigned long disptime;
124
e43473b7
VG
125 unsigned int flags;
126
693e751e
TH
127 /* are there any throtl rules between this group and td? */
128 bool has_rules[2];
129
e43473b7
VG
130 /* bytes per second rate limits */
131 uint64_t bps[2];
132
8e89d13f
VG
133 /* IOPS limits */
134 unsigned int iops[2];
135
e43473b7
VG
136 /* Number of bytes disptached in current slice */
137 uint64_t bytes_disp[2];
8e89d13f
VG
138 /* Number of bio's dispatched in current slice */
139 unsigned int io_disp[2];
e43473b7
VG
140
141 /* When did we start a new slice */
142 unsigned long slice_start[2];
143 unsigned long slice_end[2];
fe071437 144
8a3d2615
TH
145 /* Per cpu stats pointer */
146 struct tg_stats_cpu __percpu *stats_cpu;
147
148 /* List of tgs waiting for per cpu stats memory to be allocated */
149 struct list_head stats_alloc_node;
e43473b7
VG
150};
151
152struct throtl_data
153{
e43473b7 154 /* service tree for active throtl groups */
c9e0332e 155 struct throtl_service_queue service_queue;
e43473b7 156
e43473b7
VG
157 struct request_queue *queue;
158
159 /* Total Number of queued bios on READ and WRITE lists */
160 unsigned int nr_queued[2];
161
162 /*
02977e4a 163 * number of total undestroyed groups
e43473b7
VG
164 */
165 unsigned int nr_undestroyed_grps;
166
167 /* Work for dispatching throttled bios */
69df0ab0 168 struct work_struct dispatch_work;
e43473b7
VG
169};
170
8a3d2615
TH
171/* list and work item to allocate percpu group stats */
172static DEFINE_SPINLOCK(tg_stats_alloc_lock);
173static LIST_HEAD(tg_stats_alloc_list);
174
175static void tg_stats_alloc_fn(struct work_struct *);
176static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
177
69df0ab0
TH
178static void throtl_pending_timer_fn(unsigned long arg);
179
f95a04af
TH
180static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
181{
182 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
183}
184
3c798398 185static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
0381411e 186{
f95a04af 187 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
0381411e
TH
188}
189
3c798398 190static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
0381411e 191{
f95a04af 192 return pd_to_blkg(&tg->pd);
0381411e
TH
193}
194
03d8e111
TH
195static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
196{
197 return blkg_to_tg(td->queue->root_blkg);
198}
199
fda6f272
TH
200/**
201 * sq_to_tg - return the throl_grp the specified service queue belongs to
202 * @sq: the throtl_service_queue of interest
203 *
204 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
205 * embedded in throtl_data, %NULL is returned.
206 */
207static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
208{
209 if (sq && sq->parent_sq)
210 return container_of(sq, struct throtl_grp, service_queue);
211 else
212 return NULL;
213}
214
215/**
216 * sq_to_td - return throtl_data the specified service queue belongs to
217 * @sq: the throtl_service_queue of interest
218 *
219 * A service_queue can be embeded in either a throtl_grp or throtl_data.
220 * Determine the associated throtl_data accordingly and return it.
221 */
222static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
223{
224 struct throtl_grp *tg = sq_to_tg(sq);
225
226 if (tg)
227 return tg->td;
228 else
229 return container_of(sq, struct throtl_data, service_queue);
230}
231
232/**
233 * throtl_log - log debug message via blktrace
234 * @sq: the service_queue being reported
235 * @fmt: printf format string
236 * @args: printf args
237 *
238 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
239 * throtl_grp; otherwise, just "throtl".
240 *
241 * TODO: this should be made a function and name formatting should happen
242 * after testing whether blktrace is enabled.
243 */
244#define throtl_log(sq, fmt, args...) do { \
245 struct throtl_grp *__tg = sq_to_tg((sq)); \
246 struct throtl_data *__td = sq_to_td((sq)); \
247 \
248 (void)__td; \
249 if ((__tg)) { \
250 char __pbuf[128]; \
54e7ed12 251 \
fda6f272
TH
252 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
253 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
254 } else { \
255 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
256 } \
54e7ed12 257} while (0)
e43473b7 258
8a3d2615
TH
259/*
260 * Worker for allocating per cpu stat for tgs. This is scheduled on the
3b07e9ca 261 * system_wq once there are some groups on the alloc_list waiting for
8a3d2615
TH
262 * allocation.
263 */
264static void tg_stats_alloc_fn(struct work_struct *work)
265{
266 static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
267 struct delayed_work *dwork = to_delayed_work(work);
268 bool empty = false;
269
270alloc_stats:
271 if (!stats_cpu) {
272 stats_cpu = alloc_percpu(struct tg_stats_cpu);
273 if (!stats_cpu) {
274 /* allocation failed, try again after some time */
3b07e9ca 275 schedule_delayed_work(dwork, msecs_to_jiffies(10));
8a3d2615
TH
276 return;
277 }
278 }
279
280 spin_lock_irq(&tg_stats_alloc_lock);
281
282 if (!list_empty(&tg_stats_alloc_list)) {
283 struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
284 struct throtl_grp,
285 stats_alloc_node);
286 swap(tg->stats_cpu, stats_cpu);
287 list_del_init(&tg->stats_alloc_node);
288 }
289
290 empty = list_empty(&tg_stats_alloc_list);
291 spin_unlock_irq(&tg_stats_alloc_lock);
292 if (!empty)
293 goto alloc_stats;
294}
295
c5cc2070
TH
296static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
297{
298 INIT_LIST_HEAD(&qn->node);
299 bio_list_init(&qn->bios);
300 qn->tg = tg;
301}
302
303/**
304 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
305 * @bio: bio being added
306 * @qn: qnode to add bio to
307 * @queued: the service_queue->queued[] list @qn belongs to
308 *
309 * Add @bio to @qn and put @qn on @queued if it's not already on.
310 * @qn->tg's reference count is bumped when @qn is activated. See the
311 * comment on top of throtl_qnode definition for details.
312 */
313static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
314 struct list_head *queued)
315{
316 bio_list_add(&qn->bios, bio);
317 if (list_empty(&qn->node)) {
318 list_add_tail(&qn->node, queued);
319 blkg_get(tg_to_blkg(qn->tg));
320 }
321}
322
323/**
324 * throtl_peek_queued - peek the first bio on a qnode list
325 * @queued: the qnode list to peek
326 */
327static struct bio *throtl_peek_queued(struct list_head *queued)
328{
329 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
330 struct bio *bio;
331
332 if (list_empty(queued))
333 return NULL;
334
335 bio = bio_list_peek(&qn->bios);
336 WARN_ON_ONCE(!bio);
337 return bio;
338}
339
340/**
341 * throtl_pop_queued - pop the first bio form a qnode list
342 * @queued: the qnode list to pop a bio from
343 * @tg_to_put: optional out argument for throtl_grp to put
344 *
345 * Pop the first bio from the qnode list @queued. After popping, the first
346 * qnode is removed from @queued if empty or moved to the end of @queued so
347 * that the popping order is round-robin.
348 *
349 * When the first qnode is removed, its associated throtl_grp should be put
350 * too. If @tg_to_put is NULL, this function automatically puts it;
351 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
352 * responsible for putting it.
353 */
354static struct bio *throtl_pop_queued(struct list_head *queued,
355 struct throtl_grp **tg_to_put)
356{
357 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
358 struct bio *bio;
359
360 if (list_empty(queued))
361 return NULL;
362
363 bio = bio_list_pop(&qn->bios);
364 WARN_ON_ONCE(!bio);
365
366 if (bio_list_empty(&qn->bios)) {
367 list_del_init(&qn->node);
368 if (tg_to_put)
369 *tg_to_put = qn->tg;
370 else
371 blkg_put(tg_to_blkg(qn->tg));
372 } else {
373 list_move_tail(&qn->node, queued);
374 }
375
376 return bio;
377}
378
49a2f1e3 379/* init a service_queue, assumes the caller zeroed it */
77216b04
TH
380static void throtl_service_queue_init(struct throtl_service_queue *sq,
381 struct throtl_service_queue *parent_sq)
49a2f1e3 382{
c5cc2070
TH
383 INIT_LIST_HEAD(&sq->queued[0]);
384 INIT_LIST_HEAD(&sq->queued[1]);
49a2f1e3 385 sq->pending_tree = RB_ROOT;
77216b04 386 sq->parent_sq = parent_sq;
69df0ab0
TH
387 setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
388 (unsigned long)sq);
389}
390
391static void throtl_service_queue_exit(struct throtl_service_queue *sq)
392{
393 del_timer_sync(&sq->pending_timer);
49a2f1e3
TH
394}
395
3c798398 396static void throtl_pd_init(struct blkcg_gq *blkg)
a29a171e 397{
0381411e 398 struct throtl_grp *tg = blkg_to_tg(blkg);
77216b04 399 struct throtl_data *td = blkg->q->td;
ff26eaad 400 unsigned long flags;
c5cc2070 401 int rw;
cd1604fa 402
77216b04 403 throtl_service_queue_init(&tg->service_queue, &td->service_queue);
c5cc2070
TH
404 for (rw = READ; rw <= WRITE; rw++) {
405 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
406 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
407 }
408
a29a171e 409 RB_CLEAR_NODE(&tg->rb_node);
77216b04 410 tg->td = td;
a29a171e 411
e56da7e2
TH
412 tg->bps[READ] = -1;
413 tg->bps[WRITE] = -1;
414 tg->iops[READ] = -1;
415 tg->iops[WRITE] = -1;
8a3d2615
TH
416
417 /*
418 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
419 * but percpu allocator can't be called from IO path. Queue tg on
420 * tg_stats_alloc_list and allocate from work item.
421 */
ff26eaad 422 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
8a3d2615 423 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
3b07e9ca 424 schedule_delayed_work(&tg_stats_alloc_work, 0);
ff26eaad 425 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
8a3d2615
TH
426}
427
693e751e
TH
428/*
429 * Set has_rules[] if @tg or any of its parents have limits configured.
430 * This doesn't require walking up to the top of the hierarchy as the
431 * parent's has_rules[] is guaranteed to be correct.
432 */
433static void tg_update_has_rules(struct throtl_grp *tg)
434{
435 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
436 int rw;
437
438 for (rw = READ; rw <= WRITE; rw++)
439 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
440 (tg->bps[rw] != -1 || tg->iops[rw] != -1);
441}
442
443static void throtl_pd_online(struct blkcg_gq *blkg)
444{
445 /*
446 * We don't want new groups to escape the limits of its ancestors.
447 * Update has_rules[] after a new group is brought online.
448 */
449 tg_update_has_rules(blkg_to_tg(blkg));
450}
451
3c798398 452static void throtl_pd_exit(struct blkcg_gq *blkg)
8a3d2615
TH
453{
454 struct throtl_grp *tg = blkg_to_tg(blkg);
ff26eaad 455 unsigned long flags;
8a3d2615 456
ff26eaad 457 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
8a3d2615 458 list_del_init(&tg->stats_alloc_node);
ff26eaad 459 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
8a3d2615
TH
460
461 free_percpu(tg->stats_cpu);
69df0ab0
TH
462
463 throtl_service_queue_exit(&tg->service_queue);
8a3d2615
TH
464}
465
3c798398 466static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
8a3d2615
TH
467{
468 struct throtl_grp *tg = blkg_to_tg(blkg);
469 int cpu;
470
471 if (tg->stats_cpu == NULL)
472 return;
473
474 for_each_possible_cpu(cpu) {
475 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
476
477 blkg_rwstat_reset(&sc->service_bytes);
478 blkg_rwstat_reset(&sc->serviced);
479 }
a29a171e
VG
480}
481
3c798398
TH
482static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
483 struct blkcg *blkcg)
e43473b7 484{
be2c6b19 485 /*
3c798398
TH
486 * This is the common case when there are no blkcgs. Avoid lookup
487 * in this case
cd1604fa 488 */
3c798398 489 if (blkcg == &blkcg_root)
03d8e111 490 return td_root_tg(td);
e43473b7 491
e8989fae 492 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
e43473b7
VG
493}
494
cd1604fa 495static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
3c798398 496 struct blkcg *blkcg)
e43473b7 497{
f469a7b4 498 struct request_queue *q = td->queue;
cd1604fa 499 struct throtl_grp *tg = NULL;
bc16a4f9 500
f469a7b4 501 /*
3c798398
TH
502 * This is the common case when there are no blkcgs. Avoid lookup
503 * in this case
f469a7b4 504 */
3c798398 505 if (blkcg == &blkcg_root) {
03d8e111 506 tg = td_root_tg(td);
cd1604fa 507 } else {
3c798398 508 struct blkcg_gq *blkg;
f469a7b4 509
3c96cb32 510 blkg = blkg_lookup_create(blkcg, q);
f469a7b4 511
cd1604fa
TH
512 /* if %NULL and @q is alive, fall back to root_tg */
513 if (!IS_ERR(blkg))
0381411e 514 tg = blkg_to_tg(blkg);
3f3299d5 515 else if (!blk_queue_dying(q))
03d8e111 516 tg = td_root_tg(td);
f469a7b4
VG
517 }
518
e43473b7
VG
519 return tg;
520}
521
0049af73
TH
522static struct throtl_grp *
523throtl_rb_first(struct throtl_service_queue *parent_sq)
e43473b7
VG
524{
525 /* Service tree is empty */
0049af73 526 if (!parent_sq->nr_pending)
e43473b7
VG
527 return NULL;
528
0049af73
TH
529 if (!parent_sq->first_pending)
530 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
e43473b7 531
0049af73
TH
532 if (parent_sq->first_pending)
533 return rb_entry_tg(parent_sq->first_pending);
e43473b7
VG
534
535 return NULL;
536}
537
538static void rb_erase_init(struct rb_node *n, struct rb_root *root)
539{
540 rb_erase(n, root);
541 RB_CLEAR_NODE(n);
542}
543
0049af73
TH
544static void throtl_rb_erase(struct rb_node *n,
545 struct throtl_service_queue *parent_sq)
e43473b7 546{
0049af73
TH
547 if (parent_sq->first_pending == n)
548 parent_sq->first_pending = NULL;
549 rb_erase_init(n, &parent_sq->pending_tree);
550 --parent_sq->nr_pending;
e43473b7
VG
551}
552
0049af73 553static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
e43473b7
VG
554{
555 struct throtl_grp *tg;
556
0049af73 557 tg = throtl_rb_first(parent_sq);
e43473b7
VG
558 if (!tg)
559 return;
560
0049af73 561 parent_sq->first_pending_disptime = tg->disptime;
e43473b7
VG
562}
563
77216b04 564static void tg_service_queue_add(struct throtl_grp *tg)
e43473b7 565{
77216b04 566 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
0049af73 567 struct rb_node **node = &parent_sq->pending_tree.rb_node;
e43473b7
VG
568 struct rb_node *parent = NULL;
569 struct throtl_grp *__tg;
570 unsigned long key = tg->disptime;
571 int left = 1;
572
573 while (*node != NULL) {
574 parent = *node;
575 __tg = rb_entry_tg(parent);
576
577 if (time_before(key, __tg->disptime))
578 node = &parent->rb_left;
579 else {
580 node = &parent->rb_right;
581 left = 0;
582 }
583 }
584
585 if (left)
0049af73 586 parent_sq->first_pending = &tg->rb_node;
e43473b7
VG
587
588 rb_link_node(&tg->rb_node, parent, node);
0049af73 589 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
e43473b7
VG
590}
591
77216b04 592static void __throtl_enqueue_tg(struct throtl_grp *tg)
e43473b7 593{
77216b04 594 tg_service_queue_add(tg);
5b2c16aa 595 tg->flags |= THROTL_TG_PENDING;
77216b04 596 tg->service_queue.parent_sq->nr_pending++;
e43473b7
VG
597}
598
77216b04 599static void throtl_enqueue_tg(struct throtl_grp *tg)
e43473b7 600{
5b2c16aa 601 if (!(tg->flags & THROTL_TG_PENDING))
77216b04 602 __throtl_enqueue_tg(tg);
e43473b7
VG
603}
604
77216b04 605static void __throtl_dequeue_tg(struct throtl_grp *tg)
e43473b7 606{
77216b04 607 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
5b2c16aa 608 tg->flags &= ~THROTL_TG_PENDING;
e43473b7
VG
609}
610
77216b04 611static void throtl_dequeue_tg(struct throtl_grp *tg)
e43473b7 612{
5b2c16aa 613 if (tg->flags & THROTL_TG_PENDING)
77216b04 614 __throtl_dequeue_tg(tg);
e43473b7
VG
615}
616
a9131a27 617/* Call with queue lock held */
69df0ab0
TH
618static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
619 unsigned long expires)
a9131a27 620{
69df0ab0
TH
621 mod_timer(&sq->pending_timer, expires);
622 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
623 expires - jiffies, jiffies);
a9131a27
TH
624}
625
7f52f98c
TH
626/**
627 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
628 * @sq: the service_queue to schedule dispatch for
629 * @force: force scheduling
630 *
631 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
632 * dispatch time of the first pending child. Returns %true if either timer
633 * is armed or there's no pending child left. %false if the current
634 * dispatch window is still open and the caller should continue
635 * dispatching.
636 *
637 * If @force is %true, the dispatch timer is always scheduled and this
638 * function is guaranteed to return %true. This is to be used when the
639 * caller can't dispatch itself and needs to invoke pending_timer
640 * unconditionally. Note that forced scheduling is likely to induce short
641 * delay before dispatch starts even if @sq->first_pending_disptime is not
642 * in the future and thus shouldn't be used in hot paths.
643 */
644static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
645 bool force)
e43473b7 646{
6a525600 647 /* any pending children left? */
c9e0332e 648 if (!sq->nr_pending)
7f52f98c 649 return true;
e43473b7 650
c9e0332e 651 update_min_dispatch_time(sq);
e43473b7 652
69df0ab0 653 /* is the next dispatch time in the future? */
7f52f98c 654 if (force || time_after(sq->first_pending_disptime, jiffies)) {
69df0ab0 655 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
7f52f98c 656 return true;
69df0ab0
TH
657 }
658
7f52f98c
TH
659 /* tell the caller to continue dispatching */
660 return false;
e43473b7
VG
661}
662
32ee5bc4
VG
663static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
664 bool rw, unsigned long start)
665{
666 tg->bytes_disp[rw] = 0;
667 tg->io_disp[rw] = 0;
668
669 /*
670 * Previous slice has expired. We must have trimmed it after last
671 * bio dispatch. That means since start of last slice, we never used
672 * that bandwidth. Do try to make use of that bandwidth while giving
673 * credit.
674 */
675 if (time_after_eq(start, tg->slice_start[rw]))
676 tg->slice_start[rw] = start;
677
678 tg->slice_end[rw] = jiffies + throtl_slice;
679 throtl_log(&tg->service_queue,
680 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
681 rw == READ ? 'R' : 'W', tg->slice_start[rw],
682 tg->slice_end[rw], jiffies);
683}
684
0f3457f6 685static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
e43473b7
VG
686{
687 tg->bytes_disp[rw] = 0;
8e89d13f 688 tg->io_disp[rw] = 0;
e43473b7
VG
689 tg->slice_start[rw] = jiffies;
690 tg->slice_end[rw] = jiffies + throtl_slice;
fda6f272
TH
691 throtl_log(&tg->service_queue,
692 "[%c] new slice start=%lu end=%lu jiffies=%lu",
693 rw == READ ? 'R' : 'W', tg->slice_start[rw],
694 tg->slice_end[rw], jiffies);
e43473b7
VG
695}
696
0f3457f6
TH
697static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
698 unsigned long jiffy_end)
d1ae8ffd
VG
699{
700 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
701}
702
0f3457f6
TH
703static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
704 unsigned long jiffy_end)
e43473b7
VG
705{
706 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
fda6f272
TH
707 throtl_log(&tg->service_queue,
708 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
709 rw == READ ? 'R' : 'W', tg->slice_start[rw],
710 tg->slice_end[rw], jiffies);
e43473b7
VG
711}
712
713/* Determine if previously allocated or extended slice is complete or not */
0f3457f6 714static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
e43473b7
VG
715{
716 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
717 return 0;
718
719 return 1;
720}
721
722/* Trim the used slices and adjust slice start accordingly */
0f3457f6 723static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
e43473b7 724{
3aad5d3e
VG
725 unsigned long nr_slices, time_elapsed, io_trim;
726 u64 bytes_trim, tmp;
e43473b7
VG
727
728 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
729
730 /*
731 * If bps are unlimited (-1), then time slice don't get
732 * renewed. Don't try to trim the slice if slice is used. A new
733 * slice will start when appropriate.
734 */
0f3457f6 735 if (throtl_slice_used(tg, rw))
e43473b7
VG
736 return;
737
d1ae8ffd
VG
738 /*
739 * A bio has been dispatched. Also adjust slice_end. It might happen
740 * that initially cgroup limit was very low resulting in high
741 * slice_end, but later limit was bumped up and bio was dispached
742 * sooner, then we need to reduce slice_end. A high bogus slice_end
743 * is bad because it does not allow new slice to start.
744 */
745
0f3457f6 746 throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
d1ae8ffd 747
e43473b7
VG
748 time_elapsed = jiffies - tg->slice_start[rw];
749
750 nr_slices = time_elapsed / throtl_slice;
751
752 if (!nr_slices)
753 return;
3aad5d3e
VG
754 tmp = tg->bps[rw] * throtl_slice * nr_slices;
755 do_div(tmp, HZ);
756 bytes_trim = tmp;
e43473b7 757
8e89d13f 758 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
e43473b7 759
8e89d13f 760 if (!bytes_trim && !io_trim)
e43473b7
VG
761 return;
762
763 if (tg->bytes_disp[rw] >= bytes_trim)
764 tg->bytes_disp[rw] -= bytes_trim;
765 else
766 tg->bytes_disp[rw] = 0;
767
8e89d13f
VG
768 if (tg->io_disp[rw] >= io_trim)
769 tg->io_disp[rw] -= io_trim;
770 else
771 tg->io_disp[rw] = 0;
772
e43473b7
VG
773 tg->slice_start[rw] += nr_slices * throtl_slice;
774
fda6f272
TH
775 throtl_log(&tg->service_queue,
776 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
777 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
778 tg->slice_start[rw], tg->slice_end[rw], jiffies);
e43473b7
VG
779}
780
0f3457f6
TH
781static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
782 unsigned long *wait)
e43473b7
VG
783{
784 bool rw = bio_data_dir(bio);
8e89d13f 785 unsigned int io_allowed;
e43473b7 786 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
c49c06e4 787 u64 tmp;
e43473b7 788
8e89d13f 789 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
e43473b7 790
8e89d13f
VG
791 /* Slice has just started. Consider one slice interval */
792 if (!jiffy_elapsed)
793 jiffy_elapsed_rnd = throtl_slice;
794
795 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
796
c49c06e4
VG
797 /*
798 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
799 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
800 * will allow dispatch after 1 second and after that slice should
801 * have been trimmed.
802 */
803
804 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
805 do_div(tmp, HZ);
806
807 if (tmp > UINT_MAX)
808 io_allowed = UINT_MAX;
809 else
810 io_allowed = tmp;
8e89d13f
VG
811
812 if (tg->io_disp[rw] + 1 <= io_allowed) {
e43473b7
VG
813 if (wait)
814 *wait = 0;
815 return 1;
816 }
817
8e89d13f
VG
818 /* Calc approx time to dispatch */
819 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
820
821 if (jiffy_wait > jiffy_elapsed)
822 jiffy_wait = jiffy_wait - jiffy_elapsed;
823 else
824 jiffy_wait = 1;
825
826 if (wait)
827 *wait = jiffy_wait;
828 return 0;
829}
830
0f3457f6
TH
831static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
832 unsigned long *wait)
8e89d13f
VG
833{
834 bool rw = bio_data_dir(bio);
3aad5d3e 835 u64 bytes_allowed, extra_bytes, tmp;
8e89d13f 836 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
e43473b7
VG
837
838 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
839
840 /* Slice has just started. Consider one slice interval */
841 if (!jiffy_elapsed)
842 jiffy_elapsed_rnd = throtl_slice;
843
844 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
845
5e901a2b
VG
846 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
847 do_div(tmp, HZ);
3aad5d3e 848 bytes_allowed = tmp;
e43473b7
VG
849
850 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
851 if (wait)
852 *wait = 0;
853 return 1;
854 }
855
856 /* Calc approx time to dispatch */
857 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
858 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
859
860 if (!jiffy_wait)
861 jiffy_wait = 1;
862
863 /*
864 * This wait time is without taking into consideration the rounding
865 * up we did. Add that time also.
866 */
867 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
e43473b7
VG
868 if (wait)
869 *wait = jiffy_wait;
8e89d13f
VG
870 return 0;
871}
872
873/*
874 * Returns whether one can dispatch a bio or not. Also returns approx number
875 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
876 */
0f3457f6
TH
877static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
878 unsigned long *wait)
8e89d13f
VG
879{
880 bool rw = bio_data_dir(bio);
881 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
882
883 /*
884 * Currently whole state machine of group depends on first bio
885 * queued in the group bio list. So one should not be calling
886 * this function with a different bio if there are other bios
887 * queued.
888 */
73f0d49a 889 BUG_ON(tg->service_queue.nr_queued[rw] &&
c5cc2070 890 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
e43473b7 891
8e89d13f
VG
892 /* If tg->bps = -1, then BW is unlimited */
893 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
894 if (wait)
895 *wait = 0;
896 return 1;
897 }
898
899 /*
900 * If previous slice expired, start a new one otherwise renew/extend
901 * existing slice to make sure it is at least throtl_slice interval
902 * long since now.
903 */
0f3457f6
TH
904 if (throtl_slice_used(tg, rw))
905 throtl_start_new_slice(tg, rw);
8e89d13f
VG
906 else {
907 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
0f3457f6 908 throtl_extend_slice(tg, rw, jiffies + throtl_slice);
8e89d13f
VG
909 }
910
0f3457f6
TH
911 if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
912 tg_with_in_iops_limit(tg, bio, &iops_wait)) {
8e89d13f
VG
913 if (wait)
914 *wait = 0;
915 return 1;
916 }
917
918 max_wait = max(bps_wait, iops_wait);
919
920 if (wait)
921 *wait = max_wait;
922
923 if (time_before(tg->slice_end[rw], jiffies + max_wait))
0f3457f6 924 throtl_extend_slice(tg, rw, jiffies + max_wait);
e43473b7
VG
925
926 return 0;
927}
928
3c798398 929static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
629ed0b1
TH
930 int rw)
931{
8a3d2615
TH
932 struct throtl_grp *tg = blkg_to_tg(blkg);
933 struct tg_stats_cpu *stats_cpu;
629ed0b1
TH
934 unsigned long flags;
935
936 /* If per cpu stats are not allocated yet, don't do any accounting. */
8a3d2615 937 if (tg->stats_cpu == NULL)
629ed0b1
TH
938 return;
939
940 /*
941 * Disabling interrupts to provide mutual exclusion between two
942 * writes on same cpu. It probably is not needed for 64bit. Not
943 * optimizing that case yet.
944 */
945 local_irq_save(flags);
946
8a3d2615 947 stats_cpu = this_cpu_ptr(tg->stats_cpu);
629ed0b1 948
629ed0b1
TH
949 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
950 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
951
952 local_irq_restore(flags);
953}
954
e43473b7
VG
955static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
956{
957 bool rw = bio_data_dir(bio);
e43473b7
VG
958
959 /* Charge the bio to the group */
960 tg->bytes_disp[rw] += bio->bi_size;
8e89d13f 961 tg->io_disp[rw]++;
e43473b7 962
2a0f61e6
TH
963 /*
964 * REQ_THROTTLED is used to prevent the same bio to be throttled
965 * more than once as a throttled bio will go through blk-throtl the
966 * second time when it eventually gets issued. Set it when a bio
967 * is being charged to a tg.
968 *
969 * Dispatch stats aren't recursive and each @bio should only be
970 * accounted by the @tg it was originally associated with. Let's
971 * update the stats when setting REQ_THROTTLED for the first time
972 * which is guaranteed to be for the @bio's original tg.
973 */
974 if (!(bio->bi_rw & REQ_THROTTLED)) {
975 bio->bi_rw |= REQ_THROTTLED;
976 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size,
977 bio->bi_rw);
978 }
e43473b7
VG
979}
980
c5cc2070
TH
981/**
982 * throtl_add_bio_tg - add a bio to the specified throtl_grp
983 * @bio: bio to add
984 * @qn: qnode to use
985 * @tg: the target throtl_grp
986 *
987 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
988 * tg->qnode_on_self[] is used.
989 */
990static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
991 struct throtl_grp *tg)
e43473b7 992{
73f0d49a 993 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
994 bool rw = bio_data_dir(bio);
995
c5cc2070
TH
996 if (!qn)
997 qn = &tg->qnode_on_self[rw];
998
0e9f4164
TH
999 /*
1000 * If @tg doesn't currently have any bios queued in the same
1001 * direction, queueing @bio can change when @tg should be
1002 * dispatched. Mark that @tg was empty. This is automatically
1003 * cleaered on the next tg_update_disptime().
1004 */
1005 if (!sq->nr_queued[rw])
1006 tg->flags |= THROTL_TG_WAS_EMPTY;
1007
c5cc2070
TH
1008 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1009
73f0d49a 1010 sq->nr_queued[rw]++;
77216b04 1011 throtl_enqueue_tg(tg);
e43473b7
VG
1012}
1013
77216b04 1014static void tg_update_disptime(struct throtl_grp *tg)
e43473b7 1015{
73f0d49a 1016 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
1017 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1018 struct bio *bio;
1019
c5cc2070 1020 if ((bio = throtl_peek_queued(&sq->queued[READ])))
0f3457f6 1021 tg_may_dispatch(tg, bio, &read_wait);
e43473b7 1022
c5cc2070 1023 if ((bio = throtl_peek_queued(&sq->queued[WRITE])))
0f3457f6 1024 tg_may_dispatch(tg, bio, &write_wait);
e43473b7
VG
1025
1026 min_wait = min(read_wait, write_wait);
1027 disptime = jiffies + min_wait;
1028
e43473b7 1029 /* Update dispatch time */
77216b04 1030 throtl_dequeue_tg(tg);
e43473b7 1031 tg->disptime = disptime;
77216b04 1032 throtl_enqueue_tg(tg);
0e9f4164
TH
1033
1034 /* see throtl_add_bio_tg() */
1035 tg->flags &= ~THROTL_TG_WAS_EMPTY;
e43473b7
VG
1036}
1037
32ee5bc4
VG
1038static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1039 struct throtl_grp *parent_tg, bool rw)
1040{
1041 if (throtl_slice_used(parent_tg, rw)) {
1042 throtl_start_new_slice_with_credit(parent_tg, rw,
1043 child_tg->slice_start[rw]);
1044 }
1045
1046}
1047
77216b04 1048static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
e43473b7 1049{
73f0d49a 1050 struct throtl_service_queue *sq = &tg->service_queue;
6bc9c2b4
TH
1051 struct throtl_service_queue *parent_sq = sq->parent_sq;
1052 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
c5cc2070 1053 struct throtl_grp *tg_to_put = NULL;
e43473b7
VG
1054 struct bio *bio;
1055
c5cc2070
TH
1056 /*
1057 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1058 * from @tg may put its reference and @parent_sq might end up
1059 * getting released prematurely. Remember the tg to put and put it
1060 * after @bio is transferred to @parent_sq.
1061 */
1062 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
73f0d49a 1063 sq->nr_queued[rw]--;
e43473b7
VG
1064
1065 throtl_charge_bio(tg, bio);
6bc9c2b4
TH
1066
1067 /*
1068 * If our parent is another tg, we just need to transfer @bio to
1069 * the parent using throtl_add_bio_tg(). If our parent is
1070 * @td->service_queue, @bio is ready to be issued. Put it on its
1071 * bio_lists[] and decrease total number queued. The caller is
1072 * responsible for issuing these bios.
1073 */
1074 if (parent_tg) {
c5cc2070 1075 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
32ee5bc4 1076 start_parent_slice_with_credit(tg, parent_tg, rw);
6bc9c2b4 1077 } else {
c5cc2070
TH
1078 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1079 &parent_sq->queued[rw]);
6bc9c2b4
TH
1080 BUG_ON(tg->td->nr_queued[rw] <= 0);
1081 tg->td->nr_queued[rw]--;
1082 }
e43473b7 1083
0f3457f6 1084 throtl_trim_slice(tg, rw);
6bc9c2b4 1085
c5cc2070
TH
1086 if (tg_to_put)
1087 blkg_put(tg_to_blkg(tg_to_put));
e43473b7
VG
1088}
1089
77216b04 1090static int throtl_dispatch_tg(struct throtl_grp *tg)
e43473b7 1091{
73f0d49a 1092 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
1093 unsigned int nr_reads = 0, nr_writes = 0;
1094 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
c2f6805d 1095 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
e43473b7
VG
1096 struct bio *bio;
1097
1098 /* Try to dispatch 75% READS and 25% WRITES */
1099
c5cc2070 1100 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
0f3457f6 1101 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 1102
77216b04 1103 tg_dispatch_one_bio(tg, bio_data_dir(bio));
e43473b7
VG
1104 nr_reads++;
1105
1106 if (nr_reads >= max_nr_reads)
1107 break;
1108 }
1109
c5cc2070 1110 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
0f3457f6 1111 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 1112
77216b04 1113 tg_dispatch_one_bio(tg, bio_data_dir(bio));
e43473b7
VG
1114 nr_writes++;
1115
1116 if (nr_writes >= max_nr_writes)
1117 break;
1118 }
1119
1120 return nr_reads + nr_writes;
1121}
1122
651930bc 1123static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
e43473b7
VG
1124{
1125 unsigned int nr_disp = 0;
e43473b7
VG
1126
1127 while (1) {
73f0d49a
TH
1128 struct throtl_grp *tg = throtl_rb_first(parent_sq);
1129 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
1130
1131 if (!tg)
1132 break;
1133
1134 if (time_before(jiffies, tg->disptime))
1135 break;
1136
77216b04 1137 throtl_dequeue_tg(tg);
e43473b7 1138
77216b04 1139 nr_disp += throtl_dispatch_tg(tg);
e43473b7 1140
73f0d49a 1141 if (sq->nr_queued[0] || sq->nr_queued[1])
77216b04 1142 tg_update_disptime(tg);
e43473b7
VG
1143
1144 if (nr_disp >= throtl_quantum)
1145 break;
1146 }
1147
1148 return nr_disp;
1149}
1150
6e1a5704
TH
1151/**
1152 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1153 * @arg: the throtl_service_queue being serviced
1154 *
1155 * This timer is armed when a child throtl_grp with active bio's become
1156 * pending and queued on the service_queue's pending_tree and expires when
1157 * the first child throtl_grp should be dispatched. This function
2e48a530
TH
1158 * dispatches bio's from the children throtl_grps to the parent
1159 * service_queue.
1160 *
1161 * If the parent's parent is another throtl_grp, dispatching is propagated
1162 * by either arming its pending_timer or repeating dispatch directly. If
1163 * the top-level service_tree is reached, throtl_data->dispatch_work is
1164 * kicked so that the ready bio's are issued.
6e1a5704 1165 */
69df0ab0
TH
1166static void throtl_pending_timer_fn(unsigned long arg)
1167{
1168 struct throtl_service_queue *sq = (void *)arg;
2e48a530 1169 struct throtl_grp *tg = sq_to_tg(sq);
69df0ab0 1170 struct throtl_data *td = sq_to_td(sq);
cb76199c 1171 struct request_queue *q = td->queue;
2e48a530
TH
1172 struct throtl_service_queue *parent_sq;
1173 bool dispatched;
6e1a5704 1174 int ret;
e43473b7
VG
1175
1176 spin_lock_irq(q->queue_lock);
2e48a530
TH
1177again:
1178 parent_sq = sq->parent_sq;
1179 dispatched = false;
e43473b7 1180
7f52f98c
TH
1181 while (true) {
1182 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
2e48a530
TH
1183 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1184 sq->nr_queued[READ], sq->nr_queued[WRITE]);
7f52f98c
TH
1185
1186 ret = throtl_select_dispatch(sq);
1187 if (ret) {
7f52f98c
TH
1188 throtl_log(sq, "bios disp=%u", ret);
1189 dispatched = true;
1190 }
e43473b7 1191
7f52f98c
TH
1192 if (throtl_schedule_next_dispatch(sq, false))
1193 break;
e43473b7 1194
7f52f98c
TH
1195 /* this dispatch windows is still open, relax and repeat */
1196 spin_unlock_irq(q->queue_lock);
1197 cpu_relax();
1198 spin_lock_irq(q->queue_lock);
651930bc 1199 }
e43473b7 1200
2e48a530
TH
1201 if (!dispatched)
1202 goto out_unlock;
6e1a5704 1203
2e48a530
TH
1204 if (parent_sq) {
1205 /* @parent_sq is another throl_grp, propagate dispatch */
1206 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1207 tg_update_disptime(tg);
1208 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1209 /* window is already open, repeat dispatching */
1210 sq = parent_sq;
1211 tg = sq_to_tg(sq);
1212 goto again;
1213 }
1214 }
1215 } else {
1216 /* reached the top-level, queue issueing */
1217 queue_work(kthrotld_workqueue, &td->dispatch_work);
1218 }
1219out_unlock:
e43473b7 1220 spin_unlock_irq(q->queue_lock);
6e1a5704 1221}
e43473b7 1222
6e1a5704
TH
1223/**
1224 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1225 * @work: work item being executed
1226 *
1227 * This function is queued for execution when bio's reach the bio_lists[]
1228 * of throtl_data->service_queue. Those bio's are ready and issued by this
1229 * function.
1230 */
1231void blk_throtl_dispatch_work_fn(struct work_struct *work)
1232{
1233 struct throtl_data *td = container_of(work, struct throtl_data,
1234 dispatch_work);
1235 struct throtl_service_queue *td_sq = &td->service_queue;
1236 struct request_queue *q = td->queue;
1237 struct bio_list bio_list_on_stack;
1238 struct bio *bio;
1239 struct blk_plug plug;
1240 int rw;
1241
1242 bio_list_init(&bio_list_on_stack);
1243
1244 spin_lock_irq(q->queue_lock);
c5cc2070
TH
1245 for (rw = READ; rw <= WRITE; rw++)
1246 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1247 bio_list_add(&bio_list_on_stack, bio);
6e1a5704
TH
1248 spin_unlock_irq(q->queue_lock);
1249
1250 if (!bio_list_empty(&bio_list_on_stack)) {
69d60eb9 1251 blk_start_plug(&plug);
e43473b7
VG
1252 while((bio = bio_list_pop(&bio_list_on_stack)))
1253 generic_make_request(bio);
69d60eb9 1254 blk_finish_plug(&plug);
e43473b7 1255 }
e43473b7
VG
1256}
1257
f95a04af
TH
1258static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
1259 struct blkg_policy_data *pd, int off)
41b38b6d 1260{
f95a04af 1261 struct throtl_grp *tg = pd_to_tg(pd);
41b38b6d
TH
1262 struct blkg_rwstat rwstat = { }, tmp;
1263 int i, cpu;
1264
1265 for_each_possible_cpu(cpu) {
8a3d2615 1266 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
41b38b6d
TH
1267
1268 tmp = blkg_rwstat_read((void *)sc + off);
1269 for (i = 0; i < BLKG_RWSTAT_NR; i++)
1270 rwstat.cnt[i] += tmp.cnt[i];
1271 }
1272
f95a04af 1273 return __blkg_prfill_rwstat(sf, pd, &rwstat);
41b38b6d
TH
1274}
1275
8a3d2615
TH
1276static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
1277 struct seq_file *sf)
41b38b6d 1278{
3c798398 1279 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
41b38b6d 1280
3c798398 1281 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
5bc4afb1 1282 cft->private, true);
41b38b6d
TH
1283 return 0;
1284}
1285
f95a04af
TH
1286static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1287 int off)
60c2bc2d 1288{
f95a04af
TH
1289 struct throtl_grp *tg = pd_to_tg(pd);
1290 u64 v = *(u64 *)((void *)tg + off);
60c2bc2d 1291
af133ceb 1292 if (v == -1)
60c2bc2d 1293 return 0;
f95a04af 1294 return __blkg_prfill_u64(sf, pd, v);
60c2bc2d
TH
1295}
1296
f95a04af
TH
1297static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1298 int off)
e43473b7 1299{
f95a04af
TH
1300 struct throtl_grp *tg = pd_to_tg(pd);
1301 unsigned int v = *(unsigned int *)((void *)tg + off);
fe071437 1302
af133ceb
TH
1303 if (v == -1)
1304 return 0;
f95a04af 1305 return __blkg_prfill_u64(sf, pd, v);
e43473b7
VG
1306}
1307
af133ceb
TH
1308static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1309 struct seq_file *sf)
8e89d13f 1310{
3c798398
TH
1311 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
1312 &blkcg_policy_throtl, cft->private, false);
af133ceb 1313 return 0;
8e89d13f
VG
1314}
1315
af133ceb
TH
1316static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
1317 struct seq_file *sf)
8e89d13f 1318{
3c798398
TH
1319 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
1320 &blkcg_policy_throtl, cft->private, false);
af133ceb 1321 return 0;
60c2bc2d
TH
1322}
1323
af133ceb
TH
1324static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
1325 bool is_u64)
60c2bc2d 1326{
3c798398 1327 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
60c2bc2d 1328 struct blkg_conf_ctx ctx;
af133ceb 1329 struct throtl_grp *tg;
69df0ab0 1330 struct throtl_service_queue *sq;
693e751e
TH
1331 struct blkcg_gq *blkg;
1332 struct cgroup *pos_cgrp;
60c2bc2d
TH
1333 int ret;
1334
3c798398 1335 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
60c2bc2d
TH
1336 if (ret)
1337 return ret;
1338
af133ceb 1339 tg = blkg_to_tg(ctx.blkg);
69df0ab0 1340 sq = &tg->service_queue;
af133ceb 1341
a2b1693b
TH
1342 if (!ctx.v)
1343 ctx.v = -1;
af133ceb 1344
a2b1693b
TH
1345 if (is_u64)
1346 *(u64 *)((void *)tg + cft->private) = ctx.v;
1347 else
1348 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
af133ceb 1349
fda6f272
TH
1350 throtl_log(&tg->service_queue,
1351 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1352 tg->bps[READ], tg->bps[WRITE],
1353 tg->iops[READ], tg->iops[WRITE]);
632b4493 1354
693e751e
TH
1355 /*
1356 * Update has_rules[] flags for the updated tg's subtree. A tg is
1357 * considered to have rules if either the tg itself or any of its
1358 * ancestors has rules. This identifies groups without any
1359 * restrictions in the whole hierarchy and allows them to bypass
1360 * blk-throttle.
1361 */
1362 tg_update_has_rules(tg);
1363 blkg_for_each_descendant_pre(blkg, pos_cgrp, ctx.blkg)
1364 tg_update_has_rules(blkg_to_tg(blkg));
1365
632b4493
TH
1366 /*
1367 * We're already holding queue_lock and know @tg is valid. Let's
1368 * apply the new config directly.
1369 *
1370 * Restart the slices for both READ and WRITES. It might happen
1371 * that a group's limit are dropped suddenly and we don't want to
1372 * account recently dispatched IO with new low rate.
1373 */
0f3457f6
TH
1374 throtl_start_new_slice(tg, 0);
1375 throtl_start_new_slice(tg, 1);
632b4493 1376
5b2c16aa 1377 if (tg->flags & THROTL_TG_PENDING) {
77216b04 1378 tg_update_disptime(tg);
7f52f98c 1379 throtl_schedule_next_dispatch(sq->parent_sq, true);
632b4493 1380 }
60c2bc2d
TH
1381
1382 blkg_conf_finish(&ctx);
a2b1693b 1383 return 0;
8e89d13f
VG
1384}
1385
af133ceb
TH
1386static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1387 const char *buf)
60c2bc2d 1388{
af133ceb 1389 return tg_set_conf(cgrp, cft, buf, true);
60c2bc2d
TH
1390}
1391
af133ceb
TH
1392static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
1393 const char *buf)
60c2bc2d 1394{
af133ceb 1395 return tg_set_conf(cgrp, cft, buf, false);
60c2bc2d
TH
1396}
1397
1398static struct cftype throtl_files[] = {
1399 {
1400 .name = "throttle.read_bps_device",
af133ceb
TH
1401 .private = offsetof(struct throtl_grp, bps[READ]),
1402 .read_seq_string = tg_print_conf_u64,
1403 .write_string = tg_set_conf_u64,
60c2bc2d
TH
1404 .max_write_len = 256,
1405 },
1406 {
1407 .name = "throttle.write_bps_device",
af133ceb
TH
1408 .private = offsetof(struct throtl_grp, bps[WRITE]),
1409 .read_seq_string = tg_print_conf_u64,
1410 .write_string = tg_set_conf_u64,
60c2bc2d
TH
1411 .max_write_len = 256,
1412 },
1413 {
1414 .name = "throttle.read_iops_device",
af133ceb
TH
1415 .private = offsetof(struct throtl_grp, iops[READ]),
1416 .read_seq_string = tg_print_conf_uint,
1417 .write_string = tg_set_conf_uint,
60c2bc2d
TH
1418 .max_write_len = 256,
1419 },
1420 {
1421 .name = "throttle.write_iops_device",
af133ceb
TH
1422 .private = offsetof(struct throtl_grp, iops[WRITE]),
1423 .read_seq_string = tg_print_conf_uint,
1424 .write_string = tg_set_conf_uint,
60c2bc2d
TH
1425 .max_write_len = 256,
1426 },
1427 {
1428 .name = "throttle.io_service_bytes",
5bc4afb1 1429 .private = offsetof(struct tg_stats_cpu, service_bytes),
8a3d2615 1430 .read_seq_string = tg_print_cpu_rwstat,
60c2bc2d
TH
1431 },
1432 {
1433 .name = "throttle.io_serviced",
5bc4afb1 1434 .private = offsetof(struct tg_stats_cpu, serviced),
8a3d2615 1435 .read_seq_string = tg_print_cpu_rwstat,
60c2bc2d
TH
1436 },
1437 { } /* terminate */
1438};
1439
da527770 1440static void throtl_shutdown_wq(struct request_queue *q)
e43473b7
VG
1441{
1442 struct throtl_data *td = q->td;
1443
69df0ab0 1444 cancel_work_sync(&td->dispatch_work);
e43473b7
VG
1445}
1446
3c798398 1447static struct blkcg_policy blkcg_policy_throtl = {
f9fcc2d3
TH
1448 .pd_size = sizeof(struct throtl_grp),
1449 .cftypes = throtl_files,
1450
1451 .pd_init_fn = throtl_pd_init,
693e751e 1452 .pd_online_fn = throtl_pd_online,
f9fcc2d3
TH
1453 .pd_exit_fn = throtl_pd_exit,
1454 .pd_reset_stats_fn = throtl_pd_reset_stats,
e43473b7
VG
1455};
1456
bc16a4f9 1457bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
e43473b7
VG
1458{
1459 struct throtl_data *td = q->td;
c5cc2070 1460 struct throtl_qnode *qn = NULL;
e43473b7 1461 struct throtl_grp *tg;
73f0d49a 1462 struct throtl_service_queue *sq;
0e9f4164 1463 bool rw = bio_data_dir(bio);
3c798398 1464 struct blkcg *blkcg;
bc16a4f9 1465 bool throttled = false;
e43473b7 1466
2a0f61e6
TH
1467 /* see throtl_charge_bio() */
1468 if (bio->bi_rw & REQ_THROTTLED)
bc16a4f9 1469 goto out;
e43473b7 1470
af75cd3c
VG
1471 /*
1472 * A throtl_grp pointer retrieved under rcu can be used to access
1473 * basic fields like stats and io rates. If a group has no rules,
1474 * just update the dispatch stats in lockless manner and return.
1475 */
af75cd3c 1476 rcu_read_lock();
3c798398 1477 blkcg = bio_blkcg(bio);
cd1604fa 1478 tg = throtl_lookup_tg(td, blkcg);
af75cd3c 1479 if (tg) {
693e751e 1480 if (!tg->has_rules[rw]) {
629ed0b1
TH
1481 throtl_update_dispatch_stats(tg_to_blkg(tg),
1482 bio->bi_size, bio->bi_rw);
2a7f1244 1483 goto out_unlock_rcu;
af75cd3c
VG
1484 }
1485 }
af75cd3c
VG
1486
1487 /*
1488 * Either group has not been allocated yet or it is not an unlimited
1489 * IO group
1490 */
e43473b7 1491 spin_lock_irq(q->queue_lock);
cd1604fa 1492 tg = throtl_lookup_create_tg(td, blkcg);
bc16a4f9
TH
1493 if (unlikely(!tg))
1494 goto out_unlock;
f469a7b4 1495
73f0d49a
TH
1496 sq = &tg->service_queue;
1497
9e660acf
TH
1498 while (true) {
1499 /* throtl is FIFO - if bios are already queued, should queue */
1500 if (sq->nr_queued[rw])
1501 break;
de701c74 1502
9e660acf
TH
1503 /* if above limits, break to queue */
1504 if (!tg_may_dispatch(tg, bio, NULL))
1505 break;
1506
1507 /* within limits, let's charge and dispatch directly */
e43473b7 1508 throtl_charge_bio(tg, bio);
04521db0
VG
1509
1510 /*
1511 * We need to trim slice even when bios are not being queued
1512 * otherwise it might happen that a bio is not queued for
1513 * a long time and slice keeps on extending and trim is not
1514 * called for a long time. Now if limits are reduced suddenly
1515 * we take into account all the IO dispatched so far at new
1516 * low rate and * newly queued IO gets a really long dispatch
1517 * time.
1518 *
1519 * So keep on trimming slice even if bio is not queued.
1520 */
0f3457f6 1521 throtl_trim_slice(tg, rw);
9e660acf
TH
1522
1523 /*
1524 * @bio passed through this layer without being throttled.
1525 * Climb up the ladder. If we''re already at the top, it
1526 * can be executed directly.
1527 */
c5cc2070 1528 qn = &tg->qnode_on_parent[rw];
9e660acf
TH
1529 sq = sq->parent_sq;
1530 tg = sq_to_tg(sq);
1531 if (!tg)
1532 goto out_unlock;
e43473b7
VG
1533 }
1534
9e660acf 1535 /* out-of-limit, queue to @tg */
fda6f272
TH
1536 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1537 rw == READ ? 'R' : 'W',
1538 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1539 tg->io_disp[rw], tg->iops[rw],
1540 sq->nr_queued[READ], sq->nr_queued[WRITE]);
e43473b7 1541
671058fb 1542 bio_associate_current(bio);
6bc9c2b4 1543 tg->td->nr_queued[rw]++;
c5cc2070 1544 throtl_add_bio_tg(bio, qn, tg);
bc16a4f9 1545 throttled = true;
e43473b7 1546
7f52f98c
TH
1547 /*
1548 * Update @tg's dispatch time and force schedule dispatch if @tg
1549 * was empty before @bio. The forced scheduling isn't likely to
1550 * cause undue delay as @bio is likely to be dispatched directly if
1551 * its @tg's disptime is not in the future.
1552 */
0e9f4164 1553 if (tg->flags & THROTL_TG_WAS_EMPTY) {
77216b04 1554 tg_update_disptime(tg);
7f52f98c 1555 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
e43473b7
VG
1556 }
1557
bc16a4f9 1558out_unlock:
e43473b7 1559 spin_unlock_irq(q->queue_lock);
2a7f1244
TH
1560out_unlock_rcu:
1561 rcu_read_unlock();
bc16a4f9 1562out:
2a0f61e6
TH
1563 /*
1564 * As multiple blk-throtls may stack in the same issue path, we
1565 * don't want bios to leave with the flag set. Clear the flag if
1566 * being issued.
1567 */
1568 if (!throttled)
1569 bio->bi_rw &= ~REQ_THROTTLED;
bc16a4f9 1570 return throttled;
e43473b7
VG
1571}
1572
2a12f0dc
TH
1573/*
1574 * Dispatch all bios from all children tg's queued on @parent_sq. On
1575 * return, @parent_sq is guaranteed to not have any active children tg's
1576 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
1577 */
1578static void tg_drain_bios(struct throtl_service_queue *parent_sq)
1579{
1580 struct throtl_grp *tg;
1581
1582 while ((tg = throtl_rb_first(parent_sq))) {
1583 struct throtl_service_queue *sq = &tg->service_queue;
1584 struct bio *bio;
1585
1586 throtl_dequeue_tg(tg);
1587
c5cc2070 1588 while ((bio = throtl_peek_queued(&sq->queued[READ])))
2a12f0dc 1589 tg_dispatch_one_bio(tg, bio_data_dir(bio));
c5cc2070 1590 while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2a12f0dc
TH
1591 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1592 }
1593}
1594
c9a929dd
TH
1595/**
1596 * blk_throtl_drain - drain throttled bios
1597 * @q: request_queue to drain throttled bios for
1598 *
1599 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1600 */
1601void blk_throtl_drain(struct request_queue *q)
1602 __releases(q->queue_lock) __acquires(q->queue_lock)
1603{
1604 struct throtl_data *td = q->td;
2a12f0dc
TH
1605 struct blkcg_gq *blkg;
1606 struct cgroup *pos_cgrp;
c9a929dd 1607 struct bio *bio;
651930bc 1608 int rw;
c9a929dd 1609
8bcb6c7d 1610 queue_lockdep_assert_held(q);
2a12f0dc 1611 rcu_read_lock();
c9a929dd 1612
2a12f0dc
TH
1613 /*
1614 * Drain each tg while doing post-order walk on the blkg tree, so
1615 * that all bios are propagated to td->service_queue. It'd be
1616 * better to walk service_queue tree directly but blkg walk is
1617 * easier.
1618 */
1619 blkg_for_each_descendant_post(blkg, pos_cgrp, td->queue->root_blkg)
1620 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
73f0d49a 1621
2a12f0dc 1622 tg_drain_bios(&td_root_tg(td)->service_queue);
c9a929dd 1623
2a12f0dc
TH
1624 /* finally, transfer bios from top-level tg's into the td */
1625 tg_drain_bios(&td->service_queue);
1626
1627 rcu_read_unlock();
c9a929dd
TH
1628 spin_unlock_irq(q->queue_lock);
1629
2a12f0dc 1630 /* all bios now should be in td->service_queue, issue them */
651930bc 1631 for (rw = READ; rw <= WRITE; rw++)
c5cc2070
TH
1632 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
1633 NULL)))
651930bc 1634 generic_make_request(bio);
c9a929dd
TH
1635
1636 spin_lock_irq(q->queue_lock);
1637}
1638
e43473b7
VG
1639int blk_throtl_init(struct request_queue *q)
1640{
1641 struct throtl_data *td;
a2b1693b 1642 int ret;
e43473b7
VG
1643
1644 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1645 if (!td)
1646 return -ENOMEM;
1647
69df0ab0 1648 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
77216b04 1649 throtl_service_queue_init(&td->service_queue, NULL);
e43473b7 1650
cd1604fa 1651 q->td = td;
29b12589 1652 td->queue = q;
02977e4a 1653
a2b1693b 1654 /* activate policy */
3c798398 1655 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
a2b1693b 1656 if (ret)
f51b802c 1657 kfree(td);
a2b1693b 1658 return ret;
e43473b7
VG
1659}
1660
1661void blk_throtl_exit(struct request_queue *q)
1662{
c875f4d0 1663 BUG_ON(!q->td);
da527770 1664 throtl_shutdown_wq(q);
3c798398 1665 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
c9a929dd 1666 kfree(q->td);
e43473b7
VG
1667}
1668
1669static int __init throtl_init(void)
1670{
450adcbe
VG
1671 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1672 if (!kthrotld_workqueue)
1673 panic("Failed to create kthrotld\n");
1674
3c798398 1675 return blkcg_policy_register(&blkcg_policy_throtl);
e43473b7
VG
1676}
1677
1678module_init(throtl_init);