2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum = 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum = 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice = HZ/10; /* 100 ms */
24 /* A workqueue to queue throttle related work */
25 static struct workqueue_struct *kthrotld_workqueue;
26 static void throtl_schedule_delayed_work(struct throtl_data *td,
29 struct throtl_rb_root {
33 unsigned long min_disptime;
36 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
37 .count = 0, .min_disptime = 0}
39 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
42 /* List of throtl groups on the request queue*/
43 struct hlist_node tg_node;
45 /* active throtl group service_tree member */
46 struct rb_node rb_node;
49 * Dispatch time in jiffies. This is the estimated time when group
50 * will unthrottle and is ready to dispatch more bio. It is used as
51 * key to sort active groups in service tree.
53 unsigned long disptime;
55 struct blkio_group blkg;
59 /* Two lists for READ and WRITE */
60 struct bio_list bio_lists[2];
62 /* Number of queued bios on READ and WRITE lists */
63 unsigned int nr_queued[2];
65 /* bytes per second rate limits */
71 /* Number of bytes disptached in current slice */
72 uint64_t bytes_disp[2];
73 /* Number of bio's dispatched in current slice */
74 unsigned int io_disp[2];
76 /* When did we start a new slice */
77 unsigned long slice_start[2];
78 unsigned long slice_end[2];
80 /* Some throttle limits got updated for the group */
83 struct rcu_head rcu_head;
88 /* List of throtl groups */
89 struct hlist_head tg_list;
91 /* service tree for active throtl groups */
92 struct throtl_rb_root tg_service_tree;
94 struct throtl_grp *root_tg;
95 struct request_queue *queue;
97 /* Total Number of queued bios on READ and WRITE lists */
98 unsigned int nr_queued[2];
101 * number of total undestroyed groups
103 unsigned int nr_undestroyed_grps;
105 /* Work for dispatching throttled bios */
106 struct delayed_work throtl_work;
111 enum tg_state_flags {
112 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
115 #define THROTL_TG_FNS(name) \
116 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
118 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
120 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
122 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
124 static inline int throtl_tg_##name(const struct throtl_grp *tg) \
126 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
129 THROTL_TG_FNS(on_rr);
131 #define throtl_log_tg(td, tg, fmt, args...) \
132 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
133 blkg_path(&(tg)->blkg), ##args); \
135 #define throtl_log(td, fmt, args...) \
136 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
138 static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
141 return container_of(blkg, struct throtl_grp, blkg);
146 static inline unsigned int total_nr_queued(struct throtl_data *td)
148 return td->nr_queued[0] + td->nr_queued[1];
151 static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
153 atomic_inc(&tg->ref);
157 static void throtl_free_tg(struct rcu_head *head)
159 struct throtl_grp *tg;
161 tg = container_of(head, struct throtl_grp, rcu_head);
162 free_percpu(tg->blkg.stats_cpu);
166 static void throtl_put_tg(struct throtl_grp *tg)
168 BUG_ON(atomic_read(&tg->ref) <= 0);
169 if (!atomic_dec_and_test(&tg->ref))
173 * A group is freed in rcu manner. But having an rcu lock does not
174 * mean that one can access all the fields of blkg and assume these
175 * are valid. For example, don't try to follow throtl_data and
176 * request queue links.
178 * Having a reference to blkg under an rcu allows acess to only
179 * values local to groups like group stats and group rate limits
181 call_rcu(&tg->rcu_head, throtl_free_tg);
184 static void throtl_init_group(struct throtl_grp *tg)
186 INIT_HLIST_NODE(&tg->tg_node);
187 RB_CLEAR_NODE(&tg->rb_node);
188 bio_list_init(&tg->bio_lists[0]);
189 bio_list_init(&tg->bio_lists[1]);
190 tg->limits_changed = false;
192 /* Practically unlimited BW */
193 tg->bps[0] = tg->bps[1] = -1;
194 tg->iops[0] = tg->iops[1] = -1;
197 * Take the initial reference that will be released on destroy
198 * This can be thought of a joint reference by cgroup and
199 * request queue which will be dropped by either request queue
200 * exit or cgroup deletion path depending on who is exiting first.
202 atomic_set(&tg->ref, 1);
205 /* Should be called with rcu read lock held (needed for blkcg) */
207 throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
209 hlist_add_head(&tg->tg_node, &td->tg_list);
210 td->nr_undestroyed_grps++;
214 __throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
216 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
217 unsigned int major, minor;
219 if (!tg || tg->blkg.dev)
223 * Fill in device details for a group which might not have been
224 * filled at group creation time as queue was being instantiated
225 * and driver had not attached a device yet
227 if (bdi->dev && dev_name(bdi->dev)) {
228 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
229 tg->blkg.dev = MKDEV(major, minor);
234 * Should be called with without queue lock held. Here queue lock will be
235 * taken rarely. It will be taken only once during life time of a group
239 throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
241 if (!tg || tg->blkg.dev)
244 spin_lock_irq(td->queue->queue_lock);
245 __throtl_tg_fill_dev_details(td, tg);
246 spin_unlock_irq(td->queue->queue_lock);
249 static void throtl_init_add_tg_lists(struct throtl_data *td,
250 struct throtl_grp *tg, struct blkio_cgroup *blkcg)
252 __throtl_tg_fill_dev_details(td, tg);
254 /* Add group onto cgroup list */
255 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
256 tg->blkg.dev, BLKIO_POLICY_THROTL);
258 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
259 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
260 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
261 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
263 throtl_add_group_to_td_list(td, tg);
266 /* Should be called without queue lock and outside of rcu period */
267 static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
269 struct throtl_grp *tg = NULL;
272 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
276 ret = blkio_alloc_blkg_stats(&tg->blkg);
283 throtl_init_group(tg);
288 throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
290 struct throtl_grp *tg = NULL;
294 * This is the common case when there are no blkio cgroups.
295 * Avoid lookup in this case
297 if (blkcg == &blkio_root_cgroup)
300 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
302 __throtl_tg_fill_dev_details(td, tg);
306 static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
308 struct throtl_grp *tg = NULL, *__tg = NULL;
309 struct blkio_cgroup *blkcg;
310 struct request_queue *q = td->queue;
313 blkcg = task_blkio_cgroup(current);
314 tg = throtl_find_tg(td, blkcg);
321 * Need to allocate a group. Allocation of group also needs allocation
322 * of per cpu stats which in-turn takes a mutex() and can block. Hence
323 * we need to drop rcu lock and queue_lock before we call alloc.
326 spin_unlock_irq(q->queue_lock);
328 tg = throtl_alloc_tg(td);
330 /* Group allocated and queue is still alive. take the lock */
331 spin_lock_irq(q->queue_lock);
333 /* Make sure @q is still alive */
334 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
340 * Initialize the new group. After sleeping, read the blkcg again.
343 blkcg = task_blkio_cgroup(current);
346 * If some other thread already allocated the group while we were
347 * not holding queue lock, free up the group
349 __tg = throtl_find_tg(td, blkcg);
357 /* Group allocation failed. Account the IO to root group */
363 throtl_init_add_tg_lists(td, tg, blkcg);
368 static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
370 /* Service tree is empty */
375 root->left = rb_first(&root->rb);
378 return rb_entry_tg(root->left);
383 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
389 static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
393 rb_erase_init(n, &root->rb);
397 static void update_min_dispatch_time(struct throtl_rb_root *st)
399 struct throtl_grp *tg;
401 tg = throtl_rb_first(st);
405 st->min_disptime = tg->disptime;
409 tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
411 struct rb_node **node = &st->rb.rb_node;
412 struct rb_node *parent = NULL;
413 struct throtl_grp *__tg;
414 unsigned long key = tg->disptime;
417 while (*node != NULL) {
419 __tg = rb_entry_tg(parent);
421 if (time_before(key, __tg->disptime))
422 node = &parent->rb_left;
424 node = &parent->rb_right;
430 st->left = &tg->rb_node;
432 rb_link_node(&tg->rb_node, parent, node);
433 rb_insert_color(&tg->rb_node, &st->rb);
436 static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
438 struct throtl_rb_root *st = &td->tg_service_tree;
440 tg_service_tree_add(st, tg);
441 throtl_mark_tg_on_rr(tg);
445 static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
447 if (!throtl_tg_on_rr(tg))
448 __throtl_enqueue_tg(td, tg);
451 static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
453 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
454 throtl_clear_tg_on_rr(tg);
457 static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
459 if (throtl_tg_on_rr(tg))
460 __throtl_dequeue_tg(td, tg);
463 static void throtl_schedule_next_dispatch(struct throtl_data *td)
465 struct throtl_rb_root *st = &td->tg_service_tree;
468 * If there are more bios pending, schedule more work.
470 if (!total_nr_queued(td))
475 update_min_dispatch_time(st);
477 if (time_before_eq(st->min_disptime, jiffies))
478 throtl_schedule_delayed_work(td, 0);
480 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
484 throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
486 tg->bytes_disp[rw] = 0;
488 tg->slice_start[rw] = jiffies;
489 tg->slice_end[rw] = jiffies + throtl_slice;
490 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
491 rw == READ ? 'R' : 'W', tg->slice_start[rw],
492 tg->slice_end[rw], jiffies);
495 static inline void throtl_set_slice_end(struct throtl_data *td,
496 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
498 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
501 static inline void throtl_extend_slice(struct throtl_data *td,
502 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
504 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
505 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
506 rw == READ ? 'R' : 'W', tg->slice_start[rw],
507 tg->slice_end[rw], jiffies);
510 /* Determine if previously allocated or extended slice is complete or not */
512 throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
514 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
520 /* Trim the used slices and adjust slice start accordingly */
522 throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
524 unsigned long nr_slices, time_elapsed, io_trim;
527 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
530 * If bps are unlimited (-1), then time slice don't get
531 * renewed. Don't try to trim the slice if slice is used. A new
532 * slice will start when appropriate.
534 if (throtl_slice_used(td, tg, rw))
538 * A bio has been dispatched. Also adjust slice_end. It might happen
539 * that initially cgroup limit was very low resulting in high
540 * slice_end, but later limit was bumped up and bio was dispached
541 * sooner, then we need to reduce slice_end. A high bogus slice_end
542 * is bad because it does not allow new slice to start.
545 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
547 time_elapsed = jiffies - tg->slice_start[rw];
549 nr_slices = time_elapsed / throtl_slice;
553 tmp = tg->bps[rw] * throtl_slice * nr_slices;
557 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
559 if (!bytes_trim && !io_trim)
562 if (tg->bytes_disp[rw] >= bytes_trim)
563 tg->bytes_disp[rw] -= bytes_trim;
565 tg->bytes_disp[rw] = 0;
567 if (tg->io_disp[rw] >= io_trim)
568 tg->io_disp[rw] -= io_trim;
572 tg->slice_start[rw] += nr_slices * throtl_slice;
574 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
575 " start=%lu end=%lu jiffies=%lu",
576 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
577 tg->slice_start[rw], tg->slice_end[rw], jiffies);
580 static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
581 struct bio *bio, unsigned long *wait)
583 bool rw = bio_data_dir(bio);
584 unsigned int io_allowed;
585 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
588 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
590 /* Slice has just started. Consider one slice interval */
592 jiffy_elapsed_rnd = throtl_slice;
594 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
597 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
598 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
599 * will allow dispatch after 1 second and after that slice should
603 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
607 io_allowed = UINT_MAX;
611 if (tg->io_disp[rw] + 1 <= io_allowed) {
617 /* Calc approx time to dispatch */
618 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
620 if (jiffy_wait > jiffy_elapsed)
621 jiffy_wait = jiffy_wait - jiffy_elapsed;
630 static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
631 struct bio *bio, unsigned long *wait)
633 bool rw = bio_data_dir(bio);
634 u64 bytes_allowed, extra_bytes, tmp;
635 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
637 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
639 /* Slice has just started. Consider one slice interval */
641 jiffy_elapsed_rnd = throtl_slice;
643 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
645 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
649 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
655 /* Calc approx time to dispatch */
656 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
657 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
663 * This wait time is without taking into consideration the rounding
664 * up we did. Add that time also.
666 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
672 static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
673 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
679 * Returns whether one can dispatch a bio or not. Also returns approx number
680 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
682 static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
683 struct bio *bio, unsigned long *wait)
685 bool rw = bio_data_dir(bio);
686 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
689 * Currently whole state machine of group depends on first bio
690 * queued in the group bio list. So one should not be calling
691 * this function with a different bio if there are other bios
694 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
696 /* If tg->bps = -1, then BW is unlimited */
697 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
704 * If previous slice expired, start a new one otherwise renew/extend
705 * existing slice to make sure it is at least throtl_slice interval
708 if (throtl_slice_used(td, tg, rw))
709 throtl_start_new_slice(td, tg, rw);
711 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
712 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
715 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
716 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
722 max_wait = max(bps_wait, iops_wait);
727 if (time_before(tg->slice_end[rw], jiffies + max_wait))
728 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
733 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
735 bool rw = bio_data_dir(bio);
736 bool sync = rw_is_sync(bio->bi_rw);
738 /* Charge the bio to the group */
739 tg->bytes_disp[rw] += bio->bi_size;
742 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
745 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
748 bool rw = bio_data_dir(bio);
750 bio_list_add(&tg->bio_lists[rw], bio);
751 /* Take a bio reference on tg */
752 throtl_ref_get_tg(tg);
755 throtl_enqueue_tg(td, tg);
758 static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
760 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
763 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
764 tg_may_dispatch(td, tg, bio, &read_wait);
766 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
767 tg_may_dispatch(td, tg, bio, &write_wait);
769 min_wait = min(read_wait, write_wait);
770 disptime = jiffies + min_wait;
772 /* Update dispatch time */
773 throtl_dequeue_tg(td, tg);
774 tg->disptime = disptime;
775 throtl_enqueue_tg(td, tg);
778 static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
779 bool rw, struct bio_list *bl)
783 bio = bio_list_pop(&tg->bio_lists[rw]);
785 /* Drop bio reference on tg */
788 BUG_ON(td->nr_queued[rw] <= 0);
791 throtl_charge_bio(tg, bio);
792 bio_list_add(bl, bio);
793 bio->bi_rw |= REQ_THROTTLED;
795 throtl_trim_slice(td, tg, rw);
798 static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
801 unsigned int nr_reads = 0, nr_writes = 0;
802 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
803 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
806 /* Try to dispatch 75% READS and 25% WRITES */
808 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
809 && tg_may_dispatch(td, tg, bio, NULL)) {
811 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
814 if (nr_reads >= max_nr_reads)
818 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
819 && tg_may_dispatch(td, tg, bio, NULL)) {
821 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
824 if (nr_writes >= max_nr_writes)
828 return nr_reads + nr_writes;
831 static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
833 unsigned int nr_disp = 0;
834 struct throtl_grp *tg;
835 struct throtl_rb_root *st = &td->tg_service_tree;
838 tg = throtl_rb_first(st);
843 if (time_before(jiffies, tg->disptime))
846 throtl_dequeue_tg(td, tg);
848 nr_disp += throtl_dispatch_tg(td, tg, bl);
850 if (tg->nr_queued[0] || tg->nr_queued[1]) {
851 tg_update_disptime(td, tg);
852 throtl_enqueue_tg(td, tg);
855 if (nr_disp >= throtl_quantum)
862 static void throtl_process_limit_change(struct throtl_data *td)
864 struct throtl_grp *tg;
865 struct hlist_node *pos, *n;
867 if (!td->limits_changed)
870 xchg(&td->limits_changed, false);
872 throtl_log(td, "limits changed");
874 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
875 if (!tg->limits_changed)
878 if (!xchg(&tg->limits_changed, false))
881 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
882 " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
883 tg->iops[READ], tg->iops[WRITE]);
886 * Restart the slices for both READ and WRITES. It
887 * might happen that a group's limit are dropped
888 * suddenly and we don't want to account recently
889 * dispatched IO with new low rate
891 throtl_start_new_slice(td, tg, 0);
892 throtl_start_new_slice(td, tg, 1);
894 if (throtl_tg_on_rr(tg))
895 tg_update_disptime(td, tg);
899 /* Dispatch throttled bios. Should be called without queue lock held. */
900 static int throtl_dispatch(struct request_queue *q)
902 struct throtl_data *td = q->td;
903 unsigned int nr_disp = 0;
904 struct bio_list bio_list_on_stack;
906 struct blk_plug plug;
908 spin_lock_irq(q->queue_lock);
910 throtl_process_limit_change(td);
912 if (!total_nr_queued(td))
915 bio_list_init(&bio_list_on_stack);
917 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
918 total_nr_queued(td), td->nr_queued[READ],
919 td->nr_queued[WRITE]);
921 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
924 throtl_log(td, "bios disp=%u", nr_disp);
926 throtl_schedule_next_dispatch(td);
928 spin_unlock_irq(q->queue_lock);
931 * If we dispatched some requests, unplug the queue to make sure
935 blk_start_plug(&plug);
936 while((bio = bio_list_pop(&bio_list_on_stack)))
937 generic_make_request(bio);
938 blk_finish_plug(&plug);
943 void blk_throtl_work(struct work_struct *work)
945 struct throtl_data *td = container_of(work, struct throtl_data,
947 struct request_queue *q = td->queue;
952 /* Call with queue lock held */
954 throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
957 struct delayed_work *dwork = &td->throtl_work;
959 /* schedule work if limits changed even if no bio is queued */
960 if (total_nr_queued(td) || td->limits_changed) {
962 * We might have a work scheduled to be executed in future.
963 * Cancel that and schedule a new one.
965 __cancel_delayed_work(dwork);
966 queue_delayed_work(kthrotld_workqueue, dwork, delay);
967 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
973 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
975 /* Something wrong if we are trying to remove same group twice */
976 BUG_ON(hlist_unhashed(&tg->tg_node));
978 hlist_del_init(&tg->tg_node);
981 * Put the reference taken at the time of creation so that when all
982 * queues are gone, group can be destroyed.
985 td->nr_undestroyed_grps--;
988 static void throtl_release_tgs(struct throtl_data *td)
990 struct hlist_node *pos, *n;
991 struct throtl_grp *tg;
993 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
995 * If cgroup removal path got to blk_group first and removed
996 * it from cgroup list, then it will take care of destroying
999 if (!blkiocg_del_blkio_group(&tg->blkg))
1000 throtl_destroy_tg(td, tg);
1004 static void throtl_td_free(struct throtl_data *td)
1010 * Blk cgroup controller notification saying that blkio_group object is being
1011 * delinked as associated cgroup object is going away. That also means that
1012 * no new IO will come in this group. So get rid of this group as soon as
1013 * any pending IO in the group is finished.
1015 * This function is called under rcu_read_lock(). key is the rcu protected
1016 * pointer. That means "key" is a valid throtl_data pointer as long as we are
1019 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1020 * it should not be NULL as even if queue was going away, cgroup deltion
1021 * path got to it first.
1023 void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
1025 unsigned long flags;
1026 struct throtl_data *td = key;
1028 spin_lock_irqsave(td->queue->queue_lock, flags);
1029 throtl_destroy_tg(td, tg_of_blkg(blkg));
1030 spin_unlock_irqrestore(td->queue->queue_lock, flags);
1033 static void throtl_update_blkio_group_common(struct throtl_data *td,
1034 struct throtl_grp *tg)
1036 xchg(&tg->limits_changed, true);
1037 xchg(&td->limits_changed, true);
1038 /* Schedule a work now to process the limit change */
1039 throtl_schedule_delayed_work(td, 0);
1043 * For all update functions, key should be a valid pointer because these
1044 * update functions are called under blkcg_lock, that means, blkg is
1045 * valid and in turn key is valid. queue exit path can not race because
1048 * Can not take queue lock in update functions as queue lock under blkcg_lock
1049 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
1051 static void throtl_update_blkio_group_read_bps(void *key,
1052 struct blkio_group *blkg, u64 read_bps)
1054 struct throtl_data *td = key;
1055 struct throtl_grp *tg = tg_of_blkg(blkg);
1057 tg->bps[READ] = read_bps;
1058 throtl_update_blkio_group_common(td, tg);
1061 static void throtl_update_blkio_group_write_bps(void *key,
1062 struct blkio_group *blkg, u64 write_bps)
1064 struct throtl_data *td = key;
1065 struct throtl_grp *tg = tg_of_blkg(blkg);
1067 tg->bps[WRITE] = write_bps;
1068 throtl_update_blkio_group_common(td, tg);
1071 static void throtl_update_blkio_group_read_iops(void *key,
1072 struct blkio_group *blkg, unsigned int read_iops)
1074 struct throtl_data *td = key;
1075 struct throtl_grp *tg = tg_of_blkg(blkg);
1077 tg->iops[READ] = read_iops;
1078 throtl_update_blkio_group_common(td, tg);
1081 static void throtl_update_blkio_group_write_iops(void *key,
1082 struct blkio_group *blkg, unsigned int write_iops)
1084 struct throtl_data *td = key;
1085 struct throtl_grp *tg = tg_of_blkg(blkg);
1087 tg->iops[WRITE] = write_iops;
1088 throtl_update_blkio_group_common(td, tg);
1091 static void throtl_shutdown_wq(struct request_queue *q)
1093 struct throtl_data *td = q->td;
1095 cancel_delayed_work_sync(&td->throtl_work);
1098 static struct blkio_policy_type blkio_policy_throtl = {
1100 .blkio_unlink_group_fn = throtl_unlink_blkio_group,
1101 .blkio_update_group_read_bps_fn =
1102 throtl_update_blkio_group_read_bps,
1103 .blkio_update_group_write_bps_fn =
1104 throtl_update_blkio_group_write_bps,
1105 .blkio_update_group_read_iops_fn =
1106 throtl_update_blkio_group_read_iops,
1107 .blkio_update_group_write_iops_fn =
1108 throtl_update_blkio_group_write_iops,
1110 .plid = BLKIO_POLICY_THROTL,
1113 bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1115 struct throtl_data *td = q->td;
1116 struct throtl_grp *tg;
1117 bool rw = bio_data_dir(bio), update_disptime = true;
1118 struct blkio_cgroup *blkcg;
1119 bool throttled = false;
1121 if (bio->bi_rw & REQ_THROTTLED) {
1122 bio->bi_rw &= ~REQ_THROTTLED;
1127 * A throtl_grp pointer retrieved under rcu can be used to access
1128 * basic fields like stats and io rates. If a group has no rules,
1129 * just update the dispatch stats in lockless manner and return.
1133 blkcg = task_blkio_cgroup(current);
1134 tg = throtl_find_tg(td, blkcg);
1136 throtl_tg_fill_dev_details(td, tg);
1138 if (tg_no_rule_group(tg, rw)) {
1139 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
1140 rw, rw_is_sync(bio->bi_rw));
1148 * Either group has not been allocated yet or it is not an unlimited
1151 spin_lock_irq(q->queue_lock);
1152 tg = throtl_get_tg(td);
1156 if (tg->nr_queued[rw]) {
1158 * There is already another bio queued in same dir. No
1159 * need to update dispatch time.
1161 update_disptime = false;
1166 /* Bio is with-in rate limit of group */
1167 if (tg_may_dispatch(td, tg, bio, NULL)) {
1168 throtl_charge_bio(tg, bio);
1171 * We need to trim slice even when bios are not being queued
1172 * otherwise it might happen that a bio is not queued for
1173 * a long time and slice keeps on extending and trim is not
1174 * called for a long time. Now if limits are reduced suddenly
1175 * we take into account all the IO dispatched so far at new
1176 * low rate and * newly queued IO gets a really long dispatch
1179 * So keep on trimming slice even if bio is not queued.
1181 throtl_trim_slice(td, tg, rw);
1186 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1187 " iodisp=%u iops=%u queued=%d/%d",
1188 rw == READ ? 'R' : 'W',
1189 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1190 tg->io_disp[rw], tg->iops[rw],
1191 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1193 throtl_add_bio_tg(q->td, tg, bio);
1196 if (update_disptime) {
1197 tg_update_disptime(td, tg);
1198 throtl_schedule_next_dispatch(td);
1202 spin_unlock_irq(q->queue_lock);
1207 int blk_throtl_init(struct request_queue *q)
1209 struct throtl_data *td;
1210 struct throtl_grp *tg;
1212 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1216 INIT_HLIST_HEAD(&td->tg_list);
1217 td->tg_service_tree = THROTL_RB_ROOT;
1218 td->limits_changed = false;
1219 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1221 /* alloc and Init root group. */
1223 tg = throtl_alloc_tg(td);
1233 throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
1236 /* Attach throtl data to request queue */
1241 void blk_throtl_exit(struct request_queue *q)
1243 struct throtl_data *td = q->td;
1248 throtl_shutdown_wq(q);
1250 spin_lock_irq(q->queue_lock);
1251 throtl_release_tgs(td);
1253 /* If there are other groups */
1254 if (td->nr_undestroyed_grps > 0)
1257 spin_unlock_irq(q->queue_lock);
1260 * Wait for tg->blkg->key accessors to exit their grace periods.
1261 * Do this wait only if there are other undestroyed groups out
1262 * there (other than root group). This can happen if cgroup deletion
1263 * path claimed the responsibility of cleaning up a group before
1264 * queue cleanup code get to the group.
1266 * Do not call synchronize_rcu() unconditionally as there are drivers
1267 * which create/delete request queue hundreds of times during scan/boot
1268 * and synchronize_rcu() can take significant time and slow down boot.
1274 * Just being safe to make sure after previous flush if some body did
1275 * update limits through cgroup and another work got queued, cancel
1278 throtl_shutdown_wq(q);
1282 static int __init throtl_init(void)
1284 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1285 if (!kthrotld_workqueue)
1286 panic("Failed to create kthrotld\n");
1288 blkio_policy_register(&blkio_policy_throtl);
1292 module_init(throtl_init);