block: reorganize queue draining
[linux-2.6-block.git] / block / blk-throttle.c
CommitLineData
e43473b7
VG
1/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
12#include "blk-cgroup.h"
bc9fcbf9 13#include "blk.h"
e43473b7
VG
14
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
450adcbe
VG
24/* A workqueue to queue throttle related work */
25static struct workqueue_struct *kthrotld_workqueue;
26static void throtl_schedule_delayed_work(struct throtl_data *td,
27 unsigned long delay);
28
e43473b7
VG
29struct throtl_rb_root {
30 struct rb_root rb;
31 struct rb_node *left;
32 unsigned int count;
33 unsigned long min_disptime;
34};
35
36#define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
37 .count = 0, .min_disptime = 0}
38
39#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
40
41struct throtl_grp {
42 /* List of throtl groups on the request queue*/
43 struct hlist_node tg_node;
44
45 /* active throtl group service_tree member */
46 struct rb_node rb_node;
47
48 /*
49 * Dispatch time in jiffies. This is the estimated time when group
50 * will unthrottle and is ready to dispatch more bio. It is used as
51 * key to sort active groups in service tree.
52 */
53 unsigned long disptime;
54
55 struct blkio_group blkg;
56 atomic_t ref;
57 unsigned int flags;
58
59 /* Two lists for READ and WRITE */
60 struct bio_list bio_lists[2];
61
62 /* Number of queued bios on READ and WRITE lists */
63 unsigned int nr_queued[2];
64
65 /* bytes per second rate limits */
66 uint64_t bps[2];
67
8e89d13f
VG
68 /* IOPS limits */
69 unsigned int iops[2];
70
e43473b7
VG
71 /* Number of bytes disptached in current slice */
72 uint64_t bytes_disp[2];
8e89d13f
VG
73 /* Number of bio's dispatched in current slice */
74 unsigned int io_disp[2];
e43473b7
VG
75
76 /* When did we start a new slice */
77 unsigned long slice_start[2];
78 unsigned long slice_end[2];
fe071437
VG
79
80 /* Some throttle limits got updated for the group */
6f037937 81 int limits_changed;
4843c69d
VG
82
83 struct rcu_head rcu_head;
e43473b7
VG
84};
85
86struct throtl_data
87{
88 /* List of throtl groups */
89 struct hlist_head tg_list;
90
91 /* service tree for active throtl groups */
92 struct throtl_rb_root tg_service_tree;
93
29b12589 94 struct throtl_grp *root_tg;
e43473b7
VG
95 struct request_queue *queue;
96
97 /* Total Number of queued bios on READ and WRITE lists */
98 unsigned int nr_queued[2];
99
100 /*
02977e4a 101 * number of total undestroyed groups
e43473b7
VG
102 */
103 unsigned int nr_undestroyed_grps;
104
105 /* Work for dispatching throttled bios */
106 struct delayed_work throtl_work;
fe071437 107
6f037937 108 int limits_changed;
e43473b7
VG
109};
110
111enum tg_state_flags {
112 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
113};
114
115#define THROTL_TG_FNS(name) \
116static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
117{ \
118 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
119} \
120static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
121{ \
122 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
123} \
124static inline int throtl_tg_##name(const struct throtl_grp *tg) \
125{ \
126 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
127}
128
129THROTL_TG_FNS(on_rr);
130
131#define throtl_log_tg(td, tg, fmt, args...) \
132 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
133 blkg_path(&(tg)->blkg), ##args); \
134
135#define throtl_log(td, fmt, args...) \
136 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
137
138static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
139{
140 if (blkg)
141 return container_of(blkg, struct throtl_grp, blkg);
142
143 return NULL;
144}
145
d2f31a5f 146static inline unsigned int total_nr_queued(struct throtl_data *td)
e43473b7 147{
d2f31a5f 148 return td->nr_queued[0] + td->nr_queued[1];
e43473b7
VG
149}
150
151static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
152{
153 atomic_inc(&tg->ref);
154 return tg;
155}
156
4843c69d
VG
157static void throtl_free_tg(struct rcu_head *head)
158{
159 struct throtl_grp *tg;
160
161 tg = container_of(head, struct throtl_grp, rcu_head);
5624a4e4 162 free_percpu(tg->blkg.stats_cpu);
4843c69d
VG
163 kfree(tg);
164}
165
e43473b7
VG
166static void throtl_put_tg(struct throtl_grp *tg)
167{
168 BUG_ON(atomic_read(&tg->ref) <= 0);
169 if (!atomic_dec_and_test(&tg->ref))
170 return;
4843c69d
VG
171
172 /*
173 * A group is freed in rcu manner. But having an rcu lock does not
174 * mean that one can access all the fields of blkg and assume these
175 * are valid. For example, don't try to follow throtl_data and
176 * request queue links.
177 *
178 * Having a reference to blkg under an rcu allows acess to only
179 * values local to groups like group stats and group rate limits
180 */
181 call_rcu(&tg->rcu_head, throtl_free_tg);
e43473b7
VG
182}
183
a29a171e
VG
184static void throtl_init_group(struct throtl_grp *tg)
185{
186 INIT_HLIST_NODE(&tg->tg_node);
187 RB_CLEAR_NODE(&tg->rb_node);
188 bio_list_init(&tg->bio_lists[0]);
189 bio_list_init(&tg->bio_lists[1]);
190 tg->limits_changed = false;
191
192 /* Practically unlimited BW */
193 tg->bps[0] = tg->bps[1] = -1;
194 tg->iops[0] = tg->iops[1] = -1;
195
196 /*
197 * Take the initial reference that will be released on destroy
198 * This can be thought of a joint reference by cgroup and
199 * request queue which will be dropped by either request queue
200 * exit or cgroup deletion path depending on who is exiting first.
201 */
202 atomic_set(&tg->ref, 1);
203}
204
205/* Should be called with rcu read lock held (needed for blkcg) */
206static void
207throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
208{
209 hlist_add_head(&tg->tg_node, &td->tg_list);
210 td->nr_undestroyed_grps++;
211}
212
269f5415
VG
213static void
214__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
f469a7b4
VG
215{
216 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
217 unsigned int major, minor;
218
269f5415
VG
219 if (!tg || tg->blkg.dev)
220 return;
221
222 /*
223 * Fill in device details for a group which might not have been
224 * filled at group creation time as queue was being instantiated
225 * and driver had not attached a device yet
226 */
227 if (bdi->dev && dev_name(bdi->dev)) {
228 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
229 tg->blkg.dev = MKDEV(major, minor);
230 }
231}
232
af75cd3c
VG
233/*
234 * Should be called with without queue lock held. Here queue lock will be
235 * taken rarely. It will be taken only once during life time of a group
236 * if need be
237 */
238static void
239throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
240{
241 if (!tg || tg->blkg.dev)
242 return;
243
244 spin_lock_irq(td->queue->queue_lock);
245 __throtl_tg_fill_dev_details(td, tg);
246 spin_unlock_irq(td->queue->queue_lock);
247}
248
269f5415
VG
249static void throtl_init_add_tg_lists(struct throtl_data *td,
250 struct throtl_grp *tg, struct blkio_cgroup *blkcg)
251{
252 __throtl_tg_fill_dev_details(td, tg);
253
f469a7b4 254 /* Add group onto cgroup list */
f469a7b4 255 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
269f5415 256 tg->blkg.dev, BLKIO_POLICY_THROTL);
f469a7b4
VG
257
258 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
259 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
260 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
261 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
262
263 throtl_add_group_to_td_list(td, tg);
264}
265
266/* Should be called without queue lock and outside of rcu period */
267static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
268{
269 struct throtl_grp *tg = NULL;
5624a4e4 270 int ret;
f469a7b4
VG
271
272 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
273 if (!tg)
274 return NULL;
275
5624a4e4
VG
276 ret = blkio_alloc_blkg_stats(&tg->blkg);
277
278 if (ret) {
279 kfree(tg);
280 return NULL;
281 }
282
f469a7b4
VG
283 throtl_init_group(tg);
284 return tg;
285}
286
287static struct
288throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
e43473b7 289{
e43473b7
VG
290 struct throtl_grp *tg = NULL;
291 void *key = td;
e43473b7 292
be2c6b19
VG
293 /*
294 * This is the common case when there are no blkio cgroups.
295 * Avoid lookup in this case
296 */
297 if (blkcg == &blkio_root_cgroup)
29b12589 298 tg = td->root_tg;
be2c6b19
VG
299 else
300 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
e43473b7 301
269f5415 302 __throtl_tg_fill_dev_details(td, tg);
e43473b7
VG
303 return tg;
304}
305
f469a7b4
VG
306/*
307 * This function returns with queue lock unlocked in case of error, like
308 * request queue is no more
309 */
e43473b7
VG
310static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
311{
f469a7b4 312 struct throtl_grp *tg = NULL, *__tg = NULL;
70087dc3 313 struct blkio_cgroup *blkcg;
f469a7b4 314 struct request_queue *q = td->queue;
e43473b7
VG
315
316 rcu_read_lock();
70087dc3 317 blkcg = task_blkio_cgroup(current);
f469a7b4
VG
318 tg = throtl_find_tg(td, blkcg);
319 if (tg) {
320 rcu_read_unlock();
321 return tg;
322 }
323
324 /*
325 * Need to allocate a group. Allocation of group also needs allocation
326 * of per cpu stats which in-turn takes a mutex() and can block. Hence
315fceee 327 * we need to drop rcu lock and queue_lock before we call alloc.
f469a7b4 328 */
f469a7b4
VG
329 rcu_read_unlock();
330 spin_unlock_irq(q->queue_lock);
331
332 tg = throtl_alloc_tg(td);
333 /*
334 * We might have slept in group allocation. Make sure queue is not
335 * dead
336 */
337 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
f469a7b4
VG
338 if (tg)
339 kfree(tg);
340
341 return ERR_PTR(-ENODEV);
342 }
f469a7b4
VG
343
344 /* Group allocated and queue is still alive. take the lock */
345 spin_lock_irq(q->queue_lock);
346
347 /*
348 * Initialize the new group. After sleeping, read the blkcg again.
349 */
350 rcu_read_lock();
351 blkcg = task_blkio_cgroup(current);
352
353 /*
354 * If some other thread already allocated the group while we were
355 * not holding queue lock, free up the group
356 */
357 __tg = throtl_find_tg(td, blkcg);
358
359 if (__tg) {
360 kfree(tg);
361 rcu_read_unlock();
362 return __tg;
363 }
364
365 /* Group allocation failed. Account the IO to root group */
366 if (!tg) {
29b12589 367 tg = td->root_tg;
f469a7b4
VG
368 return tg;
369 }
370
371 throtl_init_add_tg_lists(td, tg, blkcg);
e43473b7
VG
372 rcu_read_unlock();
373 return tg;
374}
375
376static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
377{
378 /* Service tree is empty */
379 if (!root->count)
380 return NULL;
381
382 if (!root->left)
383 root->left = rb_first(&root->rb);
384
385 if (root->left)
386 return rb_entry_tg(root->left);
387
388 return NULL;
389}
390
391static void rb_erase_init(struct rb_node *n, struct rb_root *root)
392{
393 rb_erase(n, root);
394 RB_CLEAR_NODE(n);
395}
396
397static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
398{
399 if (root->left == n)
400 root->left = NULL;
401 rb_erase_init(n, &root->rb);
402 --root->count;
403}
404
405static void update_min_dispatch_time(struct throtl_rb_root *st)
406{
407 struct throtl_grp *tg;
408
409 tg = throtl_rb_first(st);
410 if (!tg)
411 return;
412
413 st->min_disptime = tg->disptime;
414}
415
416static void
417tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
418{
419 struct rb_node **node = &st->rb.rb_node;
420 struct rb_node *parent = NULL;
421 struct throtl_grp *__tg;
422 unsigned long key = tg->disptime;
423 int left = 1;
424
425 while (*node != NULL) {
426 parent = *node;
427 __tg = rb_entry_tg(parent);
428
429 if (time_before(key, __tg->disptime))
430 node = &parent->rb_left;
431 else {
432 node = &parent->rb_right;
433 left = 0;
434 }
435 }
436
437 if (left)
438 st->left = &tg->rb_node;
439
440 rb_link_node(&tg->rb_node, parent, node);
441 rb_insert_color(&tg->rb_node, &st->rb);
442}
443
444static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
445{
446 struct throtl_rb_root *st = &td->tg_service_tree;
447
448 tg_service_tree_add(st, tg);
449 throtl_mark_tg_on_rr(tg);
450 st->count++;
451}
452
453static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
454{
455 if (!throtl_tg_on_rr(tg))
456 __throtl_enqueue_tg(td, tg);
457}
458
459static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
460{
461 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
462 throtl_clear_tg_on_rr(tg);
463}
464
465static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
466{
467 if (throtl_tg_on_rr(tg))
468 __throtl_dequeue_tg(td, tg);
469}
470
471static void throtl_schedule_next_dispatch(struct throtl_data *td)
472{
473 struct throtl_rb_root *st = &td->tg_service_tree;
474
475 /*
476 * If there are more bios pending, schedule more work.
477 */
478 if (!total_nr_queued(td))
479 return;
480
481 BUG_ON(!st->count);
482
483 update_min_dispatch_time(st);
484
485 if (time_before_eq(st->min_disptime, jiffies))
450adcbe 486 throtl_schedule_delayed_work(td, 0);
e43473b7 487 else
450adcbe 488 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
e43473b7
VG
489}
490
491static inline void
492throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
493{
494 tg->bytes_disp[rw] = 0;
8e89d13f 495 tg->io_disp[rw] = 0;
e43473b7
VG
496 tg->slice_start[rw] = jiffies;
497 tg->slice_end[rw] = jiffies + throtl_slice;
498 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
499 rw == READ ? 'R' : 'W', tg->slice_start[rw],
500 tg->slice_end[rw], jiffies);
501}
502
d1ae8ffd
VG
503static inline void throtl_set_slice_end(struct throtl_data *td,
504 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
505{
506 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
507}
508
e43473b7
VG
509static inline void throtl_extend_slice(struct throtl_data *td,
510 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
511{
512 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
513 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
514 rw == READ ? 'R' : 'W', tg->slice_start[rw],
515 tg->slice_end[rw], jiffies);
516}
517
518/* Determine if previously allocated or extended slice is complete or not */
519static bool
520throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
521{
522 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
523 return 0;
524
525 return 1;
526}
527
528/* Trim the used slices and adjust slice start accordingly */
529static inline void
530throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
531{
3aad5d3e
VG
532 unsigned long nr_slices, time_elapsed, io_trim;
533 u64 bytes_trim, tmp;
e43473b7
VG
534
535 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
536
537 /*
538 * If bps are unlimited (-1), then time slice don't get
539 * renewed. Don't try to trim the slice if slice is used. A new
540 * slice will start when appropriate.
541 */
542 if (throtl_slice_used(td, tg, rw))
543 return;
544
d1ae8ffd
VG
545 /*
546 * A bio has been dispatched. Also adjust slice_end. It might happen
547 * that initially cgroup limit was very low resulting in high
548 * slice_end, but later limit was bumped up and bio was dispached
549 * sooner, then we need to reduce slice_end. A high bogus slice_end
550 * is bad because it does not allow new slice to start.
551 */
552
553 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
554
e43473b7
VG
555 time_elapsed = jiffies - tg->slice_start[rw];
556
557 nr_slices = time_elapsed / throtl_slice;
558
559 if (!nr_slices)
560 return;
3aad5d3e
VG
561 tmp = tg->bps[rw] * throtl_slice * nr_slices;
562 do_div(tmp, HZ);
563 bytes_trim = tmp;
e43473b7 564
8e89d13f 565 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
e43473b7 566
8e89d13f 567 if (!bytes_trim && !io_trim)
e43473b7
VG
568 return;
569
570 if (tg->bytes_disp[rw] >= bytes_trim)
571 tg->bytes_disp[rw] -= bytes_trim;
572 else
573 tg->bytes_disp[rw] = 0;
574
8e89d13f
VG
575 if (tg->io_disp[rw] >= io_trim)
576 tg->io_disp[rw] -= io_trim;
577 else
578 tg->io_disp[rw] = 0;
579
e43473b7
VG
580 tg->slice_start[rw] += nr_slices * throtl_slice;
581
3aad5d3e 582 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
e43473b7 583 " start=%lu end=%lu jiffies=%lu",
8e89d13f 584 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
e43473b7
VG
585 tg->slice_start[rw], tg->slice_end[rw], jiffies);
586}
587
8e89d13f
VG
588static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
589 struct bio *bio, unsigned long *wait)
e43473b7
VG
590{
591 bool rw = bio_data_dir(bio);
8e89d13f 592 unsigned int io_allowed;
e43473b7 593 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
c49c06e4 594 u64 tmp;
e43473b7 595
8e89d13f 596 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
e43473b7 597
8e89d13f
VG
598 /* Slice has just started. Consider one slice interval */
599 if (!jiffy_elapsed)
600 jiffy_elapsed_rnd = throtl_slice;
601
602 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
603
c49c06e4
VG
604 /*
605 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
606 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
607 * will allow dispatch after 1 second and after that slice should
608 * have been trimmed.
609 */
610
611 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
612 do_div(tmp, HZ);
613
614 if (tmp > UINT_MAX)
615 io_allowed = UINT_MAX;
616 else
617 io_allowed = tmp;
8e89d13f
VG
618
619 if (tg->io_disp[rw] + 1 <= io_allowed) {
e43473b7
VG
620 if (wait)
621 *wait = 0;
622 return 1;
623 }
624
8e89d13f
VG
625 /* Calc approx time to dispatch */
626 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
627
628 if (jiffy_wait > jiffy_elapsed)
629 jiffy_wait = jiffy_wait - jiffy_elapsed;
630 else
631 jiffy_wait = 1;
632
633 if (wait)
634 *wait = jiffy_wait;
635 return 0;
636}
637
638static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
639 struct bio *bio, unsigned long *wait)
640{
641 bool rw = bio_data_dir(bio);
3aad5d3e 642 u64 bytes_allowed, extra_bytes, tmp;
8e89d13f 643 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
e43473b7
VG
644
645 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
646
647 /* Slice has just started. Consider one slice interval */
648 if (!jiffy_elapsed)
649 jiffy_elapsed_rnd = throtl_slice;
650
651 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
652
5e901a2b
VG
653 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
654 do_div(tmp, HZ);
3aad5d3e 655 bytes_allowed = tmp;
e43473b7
VG
656
657 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
658 if (wait)
659 *wait = 0;
660 return 1;
661 }
662
663 /* Calc approx time to dispatch */
664 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
665 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
666
667 if (!jiffy_wait)
668 jiffy_wait = 1;
669
670 /*
671 * This wait time is without taking into consideration the rounding
672 * up we did. Add that time also.
673 */
674 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
e43473b7
VG
675 if (wait)
676 *wait = jiffy_wait;
8e89d13f
VG
677 return 0;
678}
679
af75cd3c
VG
680static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
681 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
682 return 1;
683 return 0;
684}
685
8e89d13f
VG
686/*
687 * Returns whether one can dispatch a bio or not. Also returns approx number
688 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
689 */
690static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
691 struct bio *bio, unsigned long *wait)
692{
693 bool rw = bio_data_dir(bio);
694 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
695
696 /*
697 * Currently whole state machine of group depends on first bio
698 * queued in the group bio list. So one should not be calling
699 * this function with a different bio if there are other bios
700 * queued.
701 */
702 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
e43473b7 703
8e89d13f
VG
704 /* If tg->bps = -1, then BW is unlimited */
705 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
706 if (wait)
707 *wait = 0;
708 return 1;
709 }
710
711 /*
712 * If previous slice expired, start a new one otherwise renew/extend
713 * existing slice to make sure it is at least throtl_slice interval
714 * long since now.
715 */
716 if (throtl_slice_used(td, tg, rw))
717 throtl_start_new_slice(td, tg, rw);
718 else {
719 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
720 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
721 }
722
723 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
724 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
725 if (wait)
726 *wait = 0;
727 return 1;
728 }
729
730 max_wait = max(bps_wait, iops_wait);
731
732 if (wait)
733 *wait = max_wait;
734
735 if (time_before(tg->slice_end[rw], jiffies + max_wait))
736 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
e43473b7
VG
737
738 return 0;
739}
740
741static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
742{
743 bool rw = bio_data_dir(bio);
e5a94f56 744 bool sync = rw_is_sync(bio->bi_rw);
e43473b7
VG
745
746 /* Charge the bio to the group */
747 tg->bytes_disp[rw] += bio->bi_size;
8e89d13f 748 tg->io_disp[rw]++;
e43473b7 749
e43473b7 750 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
e43473b7
VG
751}
752
753static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
754 struct bio *bio)
755{
756 bool rw = bio_data_dir(bio);
757
758 bio_list_add(&tg->bio_lists[rw], bio);
759 /* Take a bio reference on tg */
760 throtl_ref_get_tg(tg);
761 tg->nr_queued[rw]++;
762 td->nr_queued[rw]++;
763 throtl_enqueue_tg(td, tg);
764}
765
766static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
767{
768 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
769 struct bio *bio;
770
771 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
772 tg_may_dispatch(td, tg, bio, &read_wait);
773
774 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
775 tg_may_dispatch(td, tg, bio, &write_wait);
776
777 min_wait = min(read_wait, write_wait);
778 disptime = jiffies + min_wait;
779
e43473b7
VG
780 /* Update dispatch time */
781 throtl_dequeue_tg(td, tg);
782 tg->disptime = disptime;
783 throtl_enqueue_tg(td, tg);
784}
785
786static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
787 bool rw, struct bio_list *bl)
788{
789 struct bio *bio;
790
791 bio = bio_list_pop(&tg->bio_lists[rw]);
792 tg->nr_queued[rw]--;
793 /* Drop bio reference on tg */
794 throtl_put_tg(tg);
795
796 BUG_ON(td->nr_queued[rw] <= 0);
797 td->nr_queued[rw]--;
798
799 throtl_charge_bio(tg, bio);
800 bio_list_add(bl, bio);
801 bio->bi_rw |= REQ_THROTTLED;
802
803 throtl_trim_slice(td, tg, rw);
804}
805
806static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
807 struct bio_list *bl)
808{
809 unsigned int nr_reads = 0, nr_writes = 0;
810 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
c2f6805d 811 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
e43473b7
VG
812 struct bio *bio;
813
814 /* Try to dispatch 75% READS and 25% WRITES */
815
816 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
817 && tg_may_dispatch(td, tg, bio, NULL)) {
818
819 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
820 nr_reads++;
821
822 if (nr_reads >= max_nr_reads)
823 break;
824 }
825
826 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
827 && tg_may_dispatch(td, tg, bio, NULL)) {
828
829 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
830 nr_writes++;
831
832 if (nr_writes >= max_nr_writes)
833 break;
834 }
835
836 return nr_reads + nr_writes;
837}
838
839static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
840{
841 unsigned int nr_disp = 0;
842 struct throtl_grp *tg;
843 struct throtl_rb_root *st = &td->tg_service_tree;
844
845 while (1) {
846 tg = throtl_rb_first(st);
847
848 if (!tg)
849 break;
850
851 if (time_before(jiffies, tg->disptime))
852 break;
853
854 throtl_dequeue_tg(td, tg);
855
856 nr_disp += throtl_dispatch_tg(td, tg, bl);
857
858 if (tg->nr_queued[0] || tg->nr_queued[1]) {
859 tg_update_disptime(td, tg);
860 throtl_enqueue_tg(td, tg);
861 }
862
863 if (nr_disp >= throtl_quantum)
864 break;
865 }
866
867 return nr_disp;
868}
869
fe071437
VG
870static void throtl_process_limit_change(struct throtl_data *td)
871{
872 struct throtl_grp *tg;
873 struct hlist_node *pos, *n;
874
de701c74 875 if (!td->limits_changed)
fe071437
VG
876 return;
877
de701c74 878 xchg(&td->limits_changed, false);
fe071437 879
de701c74 880 throtl_log(td, "limits changed");
fe071437 881
04a6b516 882 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
de701c74
VG
883 if (!tg->limits_changed)
884 continue;
885
886 if (!xchg(&tg->limits_changed, false))
887 continue;
888
889 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
890 " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
891 tg->iops[READ], tg->iops[WRITE]);
892
04521db0
VG
893 /*
894 * Restart the slices for both READ and WRITES. It
895 * might happen that a group's limit are dropped
896 * suddenly and we don't want to account recently
897 * dispatched IO with new low rate
898 */
899 throtl_start_new_slice(td, tg, 0);
900 throtl_start_new_slice(td, tg, 1);
901
de701c74 902 if (throtl_tg_on_rr(tg))
fe071437 903 tg_update_disptime(td, tg);
fe071437 904 }
fe071437
VG
905}
906
e43473b7
VG
907/* Dispatch throttled bios. Should be called without queue lock held. */
908static int throtl_dispatch(struct request_queue *q)
909{
910 struct throtl_data *td = q->td;
911 unsigned int nr_disp = 0;
912 struct bio_list bio_list_on_stack;
913 struct bio *bio;
69d60eb9 914 struct blk_plug plug;
e43473b7
VG
915
916 spin_lock_irq(q->queue_lock);
917
fe071437
VG
918 throtl_process_limit_change(td);
919
e43473b7
VG
920 if (!total_nr_queued(td))
921 goto out;
922
923 bio_list_init(&bio_list_on_stack);
924
d2f31a5f 925 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
e43473b7
VG
926 total_nr_queued(td), td->nr_queued[READ],
927 td->nr_queued[WRITE]);
928
929 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
930
931 if (nr_disp)
932 throtl_log(td, "bios disp=%u", nr_disp);
933
934 throtl_schedule_next_dispatch(td);
935out:
936 spin_unlock_irq(q->queue_lock);
937
938 /*
939 * If we dispatched some requests, unplug the queue to make sure
940 * immediate dispatch
941 */
942 if (nr_disp) {
69d60eb9 943 blk_start_plug(&plug);
e43473b7
VG
944 while((bio = bio_list_pop(&bio_list_on_stack)))
945 generic_make_request(bio);
69d60eb9 946 blk_finish_plug(&plug);
e43473b7
VG
947 }
948 return nr_disp;
949}
950
951void blk_throtl_work(struct work_struct *work)
952{
953 struct throtl_data *td = container_of(work, struct throtl_data,
954 throtl_work.work);
955 struct request_queue *q = td->queue;
956
957 throtl_dispatch(q);
958}
959
960/* Call with queue lock held */
450adcbe
VG
961static void
962throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
e43473b7
VG
963{
964
e43473b7
VG
965 struct delayed_work *dwork = &td->throtl_work;
966
04521db0 967 /* schedule work if limits changed even if no bio is queued */
d2f31a5f 968 if (total_nr_queued(td) || td->limits_changed) {
e43473b7
VG
969 /*
970 * We might have a work scheduled to be executed in future.
971 * Cancel that and schedule a new one.
972 */
973 __cancel_delayed_work(dwork);
450adcbe 974 queue_delayed_work(kthrotld_workqueue, dwork, delay);
e43473b7
VG
975 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
976 delay, jiffies);
977 }
978}
e43473b7
VG
979
980static void
981throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
982{
983 /* Something wrong if we are trying to remove same group twice */
984 BUG_ON(hlist_unhashed(&tg->tg_node));
985
986 hlist_del_init(&tg->tg_node);
987
988 /*
989 * Put the reference taken at the time of creation so that when all
990 * queues are gone, group can be destroyed.
991 */
992 throtl_put_tg(tg);
993 td->nr_undestroyed_grps--;
994}
995
996static void throtl_release_tgs(struct throtl_data *td)
997{
998 struct hlist_node *pos, *n;
999 struct throtl_grp *tg;
1000
1001 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
1002 /*
1003 * If cgroup removal path got to blk_group first and removed
1004 * it from cgroup list, then it will take care of destroying
1005 * cfqg also.
1006 */
1007 if (!blkiocg_del_blkio_group(&tg->blkg))
1008 throtl_destroy_tg(td, tg);
1009 }
1010}
1011
1012static void throtl_td_free(struct throtl_data *td)
1013{
1014 kfree(td);
1015}
1016
1017/*
1018 * Blk cgroup controller notification saying that blkio_group object is being
1019 * delinked as associated cgroup object is going away. That also means that
1020 * no new IO will come in this group. So get rid of this group as soon as
1021 * any pending IO in the group is finished.
1022 *
1023 * This function is called under rcu_read_lock(). key is the rcu protected
1024 * pointer. That means "key" is a valid throtl_data pointer as long as we are
1025 * rcu read lock.
1026 *
1027 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1028 * it should not be NULL as even if queue was going away, cgroup deltion
1029 * path got to it first.
1030 */
1031void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
1032{
1033 unsigned long flags;
1034 struct throtl_data *td = key;
1035
1036 spin_lock_irqsave(td->queue->queue_lock, flags);
1037 throtl_destroy_tg(td, tg_of_blkg(blkg));
1038 spin_unlock_irqrestore(td->queue->queue_lock, flags);
1039}
1040
de701c74
VG
1041static void throtl_update_blkio_group_common(struct throtl_data *td,
1042 struct throtl_grp *tg)
1043{
1044 xchg(&tg->limits_changed, true);
1045 xchg(&td->limits_changed, true);
1046 /* Schedule a work now to process the limit change */
1047 throtl_schedule_delayed_work(td, 0);
1048}
1049
fe071437
VG
1050/*
1051 * For all update functions, key should be a valid pointer because these
1052 * update functions are called under blkcg_lock, that means, blkg is
25985edc 1053 * valid and in turn key is valid. queue exit path can not race because
fe071437
VG
1054 * of blkcg_lock
1055 *
1056 * Can not take queue lock in update functions as queue lock under blkcg_lock
1057 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
1058 */
1059static void throtl_update_blkio_group_read_bps(void *key,
1060 struct blkio_group *blkg, u64 read_bps)
e43473b7 1061{
fe071437 1062 struct throtl_data *td = key;
de701c74 1063 struct throtl_grp *tg = tg_of_blkg(blkg);
fe071437 1064
de701c74
VG
1065 tg->bps[READ] = read_bps;
1066 throtl_update_blkio_group_common(td, tg);
e43473b7
VG
1067}
1068
fe071437
VG
1069static void throtl_update_blkio_group_write_bps(void *key,
1070 struct blkio_group *blkg, u64 write_bps)
e43473b7 1071{
fe071437 1072 struct throtl_data *td = key;
de701c74 1073 struct throtl_grp *tg = tg_of_blkg(blkg);
fe071437 1074
de701c74
VG
1075 tg->bps[WRITE] = write_bps;
1076 throtl_update_blkio_group_common(td, tg);
e43473b7
VG
1077}
1078
fe071437
VG
1079static void throtl_update_blkio_group_read_iops(void *key,
1080 struct blkio_group *blkg, unsigned int read_iops)
8e89d13f 1081{
fe071437 1082 struct throtl_data *td = key;
de701c74 1083 struct throtl_grp *tg = tg_of_blkg(blkg);
fe071437 1084
de701c74
VG
1085 tg->iops[READ] = read_iops;
1086 throtl_update_blkio_group_common(td, tg);
8e89d13f
VG
1087}
1088
fe071437
VG
1089static void throtl_update_blkio_group_write_iops(void *key,
1090 struct blkio_group *blkg, unsigned int write_iops)
8e89d13f 1091{
fe071437 1092 struct throtl_data *td = key;
de701c74 1093 struct throtl_grp *tg = tg_of_blkg(blkg);
fe071437 1094
de701c74
VG
1095 tg->iops[WRITE] = write_iops;
1096 throtl_update_blkio_group_common(td, tg);
8e89d13f
VG
1097}
1098
da527770 1099static void throtl_shutdown_wq(struct request_queue *q)
e43473b7
VG
1100{
1101 struct throtl_data *td = q->td;
1102
1103 cancel_delayed_work_sync(&td->throtl_work);
1104}
1105
1106static struct blkio_policy_type blkio_policy_throtl = {
1107 .ops = {
1108 .blkio_unlink_group_fn = throtl_unlink_blkio_group,
1109 .blkio_update_group_read_bps_fn =
1110 throtl_update_blkio_group_read_bps,
1111 .blkio_update_group_write_bps_fn =
1112 throtl_update_blkio_group_write_bps,
8e89d13f
VG
1113 .blkio_update_group_read_iops_fn =
1114 throtl_update_blkio_group_read_iops,
1115 .blkio_update_group_write_iops_fn =
1116 throtl_update_blkio_group_write_iops,
e43473b7 1117 },
8e89d13f 1118 .plid = BLKIO_POLICY_THROTL,
e43473b7
VG
1119};
1120
1121int blk_throtl_bio(struct request_queue *q, struct bio **biop)
1122{
1123 struct throtl_data *td = q->td;
1124 struct throtl_grp *tg;
1125 struct bio *bio = *biop;
1126 bool rw = bio_data_dir(bio), update_disptime = true;
af75cd3c 1127 struct blkio_cgroup *blkcg;
e43473b7
VG
1128
1129 if (bio->bi_rw & REQ_THROTTLED) {
1130 bio->bi_rw &= ~REQ_THROTTLED;
1131 return 0;
1132 }
1133
af75cd3c
VG
1134 /*
1135 * A throtl_grp pointer retrieved under rcu can be used to access
1136 * basic fields like stats and io rates. If a group has no rules,
1137 * just update the dispatch stats in lockless manner and return.
1138 */
1139
1140 rcu_read_lock();
1141 blkcg = task_blkio_cgroup(current);
1142 tg = throtl_find_tg(td, blkcg);
1143 if (tg) {
1144 throtl_tg_fill_dev_details(td, tg);
1145
1146 if (tg_no_rule_group(tg, rw)) {
1147 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
e5a94f56 1148 rw, rw_is_sync(bio->bi_rw));
af75cd3c
VG
1149 rcu_read_unlock();
1150 return 0;
1151 }
1152 }
1153 rcu_read_unlock();
1154
1155 /*
1156 * Either group has not been allocated yet or it is not an unlimited
1157 * IO group
1158 */
1159
e43473b7
VG
1160 spin_lock_irq(q->queue_lock);
1161 tg = throtl_get_tg(td);
1162
f469a7b4
VG
1163 if (IS_ERR(tg)) {
1164 if (PTR_ERR(tg) == -ENODEV) {
1165 /*
1166 * Queue is gone. No queue lock held here.
1167 */
1168 return -ENODEV;
1169 }
1170 }
1171
e43473b7
VG
1172 if (tg->nr_queued[rw]) {
1173 /*
1174 * There is already another bio queued in same dir. No
1175 * need to update dispatch time.
1176 */
231d704b 1177 update_disptime = false;
e43473b7 1178 goto queue_bio;
de701c74 1179
e43473b7
VG
1180 }
1181
1182 /* Bio is with-in rate limit of group */
1183 if (tg_may_dispatch(td, tg, bio, NULL)) {
1184 throtl_charge_bio(tg, bio);
04521db0
VG
1185
1186 /*
1187 * We need to trim slice even when bios are not being queued
1188 * otherwise it might happen that a bio is not queued for
1189 * a long time and slice keeps on extending and trim is not
1190 * called for a long time. Now if limits are reduced suddenly
1191 * we take into account all the IO dispatched so far at new
1192 * low rate and * newly queued IO gets a really long dispatch
1193 * time.
1194 *
1195 * So keep on trimming slice even if bio is not queued.
1196 */
1197 throtl_trim_slice(td, tg, rw);
e43473b7
VG
1198 goto out;
1199 }
1200
1201queue_bio:
fd16d263 1202 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
8e89d13f
VG
1203 " iodisp=%u iops=%u queued=%d/%d",
1204 rw == READ ? 'R' : 'W',
e43473b7 1205 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
8e89d13f 1206 tg->io_disp[rw], tg->iops[rw],
e43473b7
VG
1207 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1208
1209 throtl_add_bio_tg(q->td, tg, bio);
1210 *biop = NULL;
1211
1212 if (update_disptime) {
1213 tg_update_disptime(td, tg);
1214 throtl_schedule_next_dispatch(td);
1215 }
1216
1217out:
1218 spin_unlock_irq(q->queue_lock);
1219 return 0;
1220}
1221
1222int blk_throtl_init(struct request_queue *q)
1223{
1224 struct throtl_data *td;
1225 struct throtl_grp *tg;
1226
1227 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1228 if (!td)
1229 return -ENOMEM;
1230
1231 INIT_HLIST_HEAD(&td->tg_list);
1232 td->tg_service_tree = THROTL_RB_ROOT;
de701c74 1233 td->limits_changed = false;
a29a171e 1234 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
e43473b7 1235
29b12589
VG
1236 /* alloc and Init root group. */
1237 td->queue = q;
1238 tg = throtl_alloc_tg(td);
02977e4a 1239
29b12589
VG
1240 if (!tg) {
1241 kfree(td);
1242 return -ENOMEM;
1243 }
1244
1245 td->root_tg = tg;
e43473b7
VG
1246
1247 rcu_read_lock();
5617cbef 1248 throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
e43473b7
VG
1249 rcu_read_unlock();
1250
1251 /* Attach throtl data to request queue */
e43473b7
VG
1252 q->td = td;
1253 return 0;
1254}
1255
1256void blk_throtl_exit(struct request_queue *q)
1257{
1258 struct throtl_data *td = q->td;
1259 bool wait = false;
1260
1261 BUG_ON(!td);
1262
da527770 1263 throtl_shutdown_wq(q);
e43473b7
VG
1264
1265 spin_lock_irq(q->queue_lock);
1266 throtl_release_tgs(td);
e43473b7
VG
1267
1268 /* If there are other groups */
02977e4a 1269 if (td->nr_undestroyed_grps > 0)
e43473b7
VG
1270 wait = true;
1271
1272 spin_unlock_irq(q->queue_lock);
1273
1274 /*
1275 * Wait for tg->blkg->key accessors to exit their grace periods.
1276 * Do this wait only if there are other undestroyed groups out
1277 * there (other than root group). This can happen if cgroup deletion
1278 * path claimed the responsibility of cleaning up a group before
1279 * queue cleanup code get to the group.
1280 *
1281 * Do not call synchronize_rcu() unconditionally as there are drivers
1282 * which create/delete request queue hundreds of times during scan/boot
1283 * and synchronize_rcu() can take significant time and slow down boot.
1284 */
1285 if (wait)
1286 synchronize_rcu();
fe071437
VG
1287
1288 /*
1289 * Just being safe to make sure after previous flush if some body did
1290 * update limits through cgroup and another work got queued, cancel
1291 * it.
1292 */
da527770 1293 throtl_shutdown_wq(q);
e43473b7
VG
1294 throtl_td_free(td);
1295}
1296
1297static int __init throtl_init(void)
1298{
450adcbe
VG
1299 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1300 if (!kthrotld_workqueue)
1301 panic("Failed to create kthrotld\n");
1302
e43473b7
VG
1303 blkio_policy_register(&blkio_policy_throtl);
1304 return 0;
1305}
1306
1307module_init(throtl_init);