2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum = 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum = 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice = HZ/10; /* 100 ms */
24 static struct blkcg_policy blkcg_policy_throtl;
26 /* A workqueue to queue throttle related work */
27 static struct workqueue_struct *kthrotld_workqueue;
28 static void throtl_schedule_delayed_work(struct throtl_data *td,
31 struct throtl_rb_root {
35 unsigned long min_disptime;
38 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
39 .count = 0, .min_disptime = 0}
41 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
43 /* Per-cpu group stats */
45 /* total bytes transferred */
46 struct blkg_rwstat service_bytes;
47 /* total IOs serviced, post merge */
48 struct blkg_rwstat serviced;
52 /* must be the first member */
53 struct blkg_policy_data pd;
55 /* active throtl group service_tree member */
56 struct rb_node rb_node;
59 * Dispatch time in jiffies. This is the estimated time when group
60 * will unthrottle and is ready to dispatch more bio. It is used as
61 * key to sort active groups in service tree.
63 unsigned long disptime;
67 /* Two lists for READ and WRITE */
68 struct bio_list bio_lists[2];
70 /* Number of queued bios on READ and WRITE lists */
71 unsigned int nr_queued[2];
73 /* bytes per second rate limits */
79 /* Number of bytes disptached in current slice */
80 uint64_t bytes_disp[2];
81 /* Number of bio's dispatched in current slice */
82 unsigned int io_disp[2];
84 /* When did we start a new slice */
85 unsigned long slice_start[2];
86 unsigned long slice_end[2];
88 /* Some throttle limits got updated for the group */
91 /* Per cpu stats pointer */
92 struct tg_stats_cpu __percpu *stats_cpu;
94 /* List of tgs waiting for per cpu stats memory to be allocated */
95 struct list_head stats_alloc_node;
100 /* service tree for active throtl groups */
101 struct throtl_rb_root tg_service_tree;
103 struct request_queue *queue;
105 /* Total Number of queued bios on READ and WRITE lists */
106 unsigned int nr_queued[2];
109 * number of total undestroyed groups
111 unsigned int nr_undestroyed_grps;
113 /* Work for dispatching throttled bios */
114 struct delayed_work throtl_work;
119 /* list and work item to allocate percpu group stats */
120 static DEFINE_SPINLOCK(tg_stats_alloc_lock);
121 static LIST_HEAD(tg_stats_alloc_list);
123 static void tg_stats_alloc_fn(struct work_struct *);
124 static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
126 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
128 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
131 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
133 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
136 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
138 return pd_to_blkg(&tg->pd);
141 static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
143 return blkg_to_tg(td->queue->root_blkg);
146 enum tg_state_flags {
147 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
150 #define THROTL_TG_FNS(name) \
151 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
153 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
155 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
157 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
159 static inline int throtl_tg_##name(const struct throtl_grp *tg) \
161 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
164 THROTL_TG_FNS(on_rr);
166 #define throtl_log_tg(td, tg, fmt, args...) do { \
169 blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
170 blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
173 #define throtl_log(td, fmt, args...) \
174 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
176 static inline unsigned int total_nr_queued(struct throtl_data *td)
178 return td->nr_queued[0] + td->nr_queued[1];
182 * Worker for allocating per cpu stat for tgs. This is scheduled on the
183 * system_nrt_wq once there are some groups on the alloc_list waiting for
186 static void tg_stats_alloc_fn(struct work_struct *work)
188 static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
189 struct delayed_work *dwork = to_delayed_work(work);
194 stats_cpu = alloc_percpu(struct tg_stats_cpu);
196 /* allocation failed, try again after some time */
197 queue_delayed_work(system_nrt_wq, dwork,
198 msecs_to_jiffies(10));
203 spin_lock_irq(&tg_stats_alloc_lock);
205 if (!list_empty(&tg_stats_alloc_list)) {
206 struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
209 swap(tg->stats_cpu, stats_cpu);
210 list_del_init(&tg->stats_alloc_node);
213 empty = list_empty(&tg_stats_alloc_list);
214 spin_unlock_irq(&tg_stats_alloc_lock);
219 static void throtl_pd_init(struct blkcg_gq *blkg)
221 struct throtl_grp *tg = blkg_to_tg(blkg);
223 RB_CLEAR_NODE(&tg->rb_node);
224 bio_list_init(&tg->bio_lists[0]);
225 bio_list_init(&tg->bio_lists[1]);
226 tg->limits_changed = false;
231 tg->iops[WRITE] = -1;
234 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
235 * but percpu allocator can't be called from IO path. Queue tg on
236 * tg_stats_alloc_list and allocate from work item.
238 spin_lock(&tg_stats_alloc_lock);
239 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
240 queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
241 spin_unlock(&tg_stats_alloc_lock);
244 static void throtl_pd_exit(struct blkcg_gq *blkg)
246 struct throtl_grp *tg = blkg_to_tg(blkg);
248 spin_lock(&tg_stats_alloc_lock);
249 list_del_init(&tg->stats_alloc_node);
250 spin_unlock(&tg_stats_alloc_lock);
252 free_percpu(tg->stats_cpu);
255 static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
257 struct throtl_grp *tg = blkg_to_tg(blkg);
260 if (tg->stats_cpu == NULL)
263 for_each_possible_cpu(cpu) {
264 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
266 blkg_rwstat_reset(&sc->service_bytes);
267 blkg_rwstat_reset(&sc->serviced);
271 static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
275 * This is the common case when there are no blkcgs. Avoid lookup
278 if (blkcg == &blkcg_root)
279 return td_root_tg(td);
281 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
284 static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
287 struct request_queue *q = td->queue;
288 struct throtl_grp *tg = NULL;
291 * This is the common case when there are no blkcgs. Avoid lookup
294 if (blkcg == &blkcg_root) {
297 struct blkcg_gq *blkg;
299 blkg = blkg_lookup_create(blkcg, q);
301 /* if %NULL and @q is alive, fall back to root_tg */
303 tg = blkg_to_tg(blkg);
304 else if (!blk_queue_dead(q))
311 static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
313 /* Service tree is empty */
318 root->left = rb_first(&root->rb);
321 return rb_entry_tg(root->left);
326 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
332 static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
336 rb_erase_init(n, &root->rb);
340 static void update_min_dispatch_time(struct throtl_rb_root *st)
342 struct throtl_grp *tg;
344 tg = throtl_rb_first(st);
348 st->min_disptime = tg->disptime;
352 tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
354 struct rb_node **node = &st->rb.rb_node;
355 struct rb_node *parent = NULL;
356 struct throtl_grp *__tg;
357 unsigned long key = tg->disptime;
360 while (*node != NULL) {
362 __tg = rb_entry_tg(parent);
364 if (time_before(key, __tg->disptime))
365 node = &parent->rb_left;
367 node = &parent->rb_right;
373 st->left = &tg->rb_node;
375 rb_link_node(&tg->rb_node, parent, node);
376 rb_insert_color(&tg->rb_node, &st->rb);
379 static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
381 struct throtl_rb_root *st = &td->tg_service_tree;
383 tg_service_tree_add(st, tg);
384 throtl_mark_tg_on_rr(tg);
388 static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
390 if (!throtl_tg_on_rr(tg))
391 __throtl_enqueue_tg(td, tg);
394 static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
396 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
397 throtl_clear_tg_on_rr(tg);
400 static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
402 if (throtl_tg_on_rr(tg))
403 __throtl_dequeue_tg(td, tg);
406 static void throtl_schedule_next_dispatch(struct throtl_data *td)
408 struct throtl_rb_root *st = &td->tg_service_tree;
411 * If there are more bios pending, schedule more work.
413 if (!total_nr_queued(td))
418 update_min_dispatch_time(st);
420 if (time_before_eq(st->min_disptime, jiffies))
421 throtl_schedule_delayed_work(td, 0);
423 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
427 throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
429 tg->bytes_disp[rw] = 0;
431 tg->slice_start[rw] = jiffies;
432 tg->slice_end[rw] = jiffies + throtl_slice;
433 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
434 rw == READ ? 'R' : 'W', tg->slice_start[rw],
435 tg->slice_end[rw], jiffies);
438 static inline void throtl_set_slice_end(struct throtl_data *td,
439 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
441 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
444 static inline void throtl_extend_slice(struct throtl_data *td,
445 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
447 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
448 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
449 rw == READ ? 'R' : 'W', tg->slice_start[rw],
450 tg->slice_end[rw], jiffies);
453 /* Determine if previously allocated or extended slice is complete or not */
455 throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
457 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
463 /* Trim the used slices and adjust slice start accordingly */
465 throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
467 unsigned long nr_slices, time_elapsed, io_trim;
470 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
473 * If bps are unlimited (-1), then time slice don't get
474 * renewed. Don't try to trim the slice if slice is used. A new
475 * slice will start when appropriate.
477 if (throtl_slice_used(td, tg, rw))
481 * A bio has been dispatched. Also adjust slice_end. It might happen
482 * that initially cgroup limit was very low resulting in high
483 * slice_end, but later limit was bumped up and bio was dispached
484 * sooner, then we need to reduce slice_end. A high bogus slice_end
485 * is bad because it does not allow new slice to start.
488 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
490 time_elapsed = jiffies - tg->slice_start[rw];
492 nr_slices = time_elapsed / throtl_slice;
496 tmp = tg->bps[rw] * throtl_slice * nr_slices;
500 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
502 if (!bytes_trim && !io_trim)
505 if (tg->bytes_disp[rw] >= bytes_trim)
506 tg->bytes_disp[rw] -= bytes_trim;
508 tg->bytes_disp[rw] = 0;
510 if (tg->io_disp[rw] >= io_trim)
511 tg->io_disp[rw] -= io_trim;
515 tg->slice_start[rw] += nr_slices * throtl_slice;
517 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
518 " start=%lu end=%lu jiffies=%lu",
519 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
520 tg->slice_start[rw], tg->slice_end[rw], jiffies);
523 static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
524 struct bio *bio, unsigned long *wait)
526 bool rw = bio_data_dir(bio);
527 unsigned int io_allowed;
528 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
531 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
533 /* Slice has just started. Consider one slice interval */
535 jiffy_elapsed_rnd = throtl_slice;
537 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
540 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
541 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
542 * will allow dispatch after 1 second and after that slice should
546 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
550 io_allowed = UINT_MAX;
554 if (tg->io_disp[rw] + 1 <= io_allowed) {
560 /* Calc approx time to dispatch */
561 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
563 if (jiffy_wait > jiffy_elapsed)
564 jiffy_wait = jiffy_wait - jiffy_elapsed;
573 static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
574 struct bio *bio, unsigned long *wait)
576 bool rw = bio_data_dir(bio);
577 u64 bytes_allowed, extra_bytes, tmp;
578 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
580 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
582 /* Slice has just started. Consider one slice interval */
584 jiffy_elapsed_rnd = throtl_slice;
586 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
588 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
592 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
598 /* Calc approx time to dispatch */
599 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
600 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
606 * This wait time is without taking into consideration the rounding
607 * up we did. Add that time also.
609 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
615 static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
616 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
622 * Returns whether one can dispatch a bio or not. Also returns approx number
623 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
625 static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
626 struct bio *bio, unsigned long *wait)
628 bool rw = bio_data_dir(bio);
629 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
632 * Currently whole state machine of group depends on first bio
633 * queued in the group bio list. So one should not be calling
634 * this function with a different bio if there are other bios
637 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
639 /* If tg->bps = -1, then BW is unlimited */
640 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
647 * If previous slice expired, start a new one otherwise renew/extend
648 * existing slice to make sure it is at least throtl_slice interval
651 if (throtl_slice_used(td, tg, rw))
652 throtl_start_new_slice(td, tg, rw);
654 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
655 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
658 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
659 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
665 max_wait = max(bps_wait, iops_wait);
670 if (time_before(tg->slice_end[rw], jiffies + max_wait))
671 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
676 static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
679 struct throtl_grp *tg = blkg_to_tg(blkg);
680 struct tg_stats_cpu *stats_cpu;
683 /* If per cpu stats are not allocated yet, don't do any accounting. */
684 if (tg->stats_cpu == NULL)
688 * Disabling interrupts to provide mutual exclusion between two
689 * writes on same cpu. It probably is not needed for 64bit. Not
690 * optimizing that case yet.
692 local_irq_save(flags);
694 stats_cpu = this_cpu_ptr(tg->stats_cpu);
696 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
697 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
699 local_irq_restore(flags);
702 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
704 bool rw = bio_data_dir(bio);
706 /* Charge the bio to the group */
707 tg->bytes_disp[rw] += bio->bi_size;
710 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
713 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
716 bool rw = bio_data_dir(bio);
718 bio_list_add(&tg->bio_lists[rw], bio);
719 /* Take a bio reference on tg */
720 blkg_get(tg_to_blkg(tg));
723 throtl_enqueue_tg(td, tg);
726 static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
728 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
731 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
732 tg_may_dispatch(td, tg, bio, &read_wait);
734 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
735 tg_may_dispatch(td, tg, bio, &write_wait);
737 min_wait = min(read_wait, write_wait);
738 disptime = jiffies + min_wait;
740 /* Update dispatch time */
741 throtl_dequeue_tg(td, tg);
742 tg->disptime = disptime;
743 throtl_enqueue_tg(td, tg);
746 static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
747 bool rw, struct bio_list *bl)
751 bio = bio_list_pop(&tg->bio_lists[rw]);
753 /* Drop bio reference on blkg */
754 blkg_put(tg_to_blkg(tg));
756 BUG_ON(td->nr_queued[rw] <= 0);
759 throtl_charge_bio(tg, bio);
760 bio_list_add(bl, bio);
761 bio->bi_rw |= REQ_THROTTLED;
763 throtl_trim_slice(td, tg, rw);
766 static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
769 unsigned int nr_reads = 0, nr_writes = 0;
770 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
771 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
774 /* Try to dispatch 75% READS and 25% WRITES */
776 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
777 && tg_may_dispatch(td, tg, bio, NULL)) {
779 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
782 if (nr_reads >= max_nr_reads)
786 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
787 && tg_may_dispatch(td, tg, bio, NULL)) {
789 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
792 if (nr_writes >= max_nr_writes)
796 return nr_reads + nr_writes;
799 static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
801 unsigned int nr_disp = 0;
802 struct throtl_grp *tg;
803 struct throtl_rb_root *st = &td->tg_service_tree;
806 tg = throtl_rb_first(st);
811 if (time_before(jiffies, tg->disptime))
814 throtl_dequeue_tg(td, tg);
816 nr_disp += throtl_dispatch_tg(td, tg, bl);
818 if (tg->nr_queued[0] || tg->nr_queued[1]) {
819 tg_update_disptime(td, tg);
820 throtl_enqueue_tg(td, tg);
823 if (nr_disp >= throtl_quantum)
830 static void throtl_process_limit_change(struct throtl_data *td)
832 struct request_queue *q = td->queue;
833 struct blkcg_gq *blkg, *n;
835 if (!td->limits_changed)
838 xchg(&td->limits_changed, false);
840 throtl_log(td, "limits changed");
842 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
843 struct throtl_grp *tg = blkg_to_tg(blkg);
845 if (!tg->limits_changed)
848 if (!xchg(&tg->limits_changed, false))
851 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
852 " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
853 tg->iops[READ], tg->iops[WRITE]);
856 * Restart the slices for both READ and WRITES. It
857 * might happen that a group's limit are dropped
858 * suddenly and we don't want to account recently
859 * dispatched IO with new low rate
861 throtl_start_new_slice(td, tg, 0);
862 throtl_start_new_slice(td, tg, 1);
864 if (throtl_tg_on_rr(tg))
865 tg_update_disptime(td, tg);
869 /* Dispatch throttled bios. Should be called without queue lock held. */
870 static int throtl_dispatch(struct request_queue *q)
872 struct throtl_data *td = q->td;
873 unsigned int nr_disp = 0;
874 struct bio_list bio_list_on_stack;
876 struct blk_plug plug;
878 spin_lock_irq(q->queue_lock);
880 throtl_process_limit_change(td);
882 if (!total_nr_queued(td))
885 bio_list_init(&bio_list_on_stack);
887 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
888 total_nr_queued(td), td->nr_queued[READ],
889 td->nr_queued[WRITE]);
891 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
894 throtl_log(td, "bios disp=%u", nr_disp);
896 throtl_schedule_next_dispatch(td);
898 spin_unlock_irq(q->queue_lock);
901 * If we dispatched some requests, unplug the queue to make sure
905 blk_start_plug(&plug);
906 while((bio = bio_list_pop(&bio_list_on_stack)))
907 generic_make_request(bio);
908 blk_finish_plug(&plug);
913 void blk_throtl_work(struct work_struct *work)
915 struct throtl_data *td = container_of(work, struct throtl_data,
917 struct request_queue *q = td->queue;
922 /* Call with queue lock held */
924 throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
927 struct delayed_work *dwork = &td->throtl_work;
929 /* schedule work if limits changed even if no bio is queued */
930 if (total_nr_queued(td) || td->limits_changed) {
932 * We might have a work scheduled to be executed in future.
933 * Cancel that and schedule a new one.
935 __cancel_delayed_work(dwork);
936 queue_delayed_work(kthrotld_workqueue, dwork, delay);
937 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
942 static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
943 struct blkg_policy_data *pd, int off)
945 struct throtl_grp *tg = pd_to_tg(pd);
946 struct blkg_rwstat rwstat = { }, tmp;
949 for_each_possible_cpu(cpu) {
950 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
952 tmp = blkg_rwstat_read((void *)sc + off);
953 for (i = 0; i < BLKG_RWSTAT_NR; i++)
954 rwstat.cnt[i] += tmp.cnt[i];
957 return __blkg_prfill_rwstat(sf, pd, &rwstat);
960 static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
963 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
965 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
970 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
973 struct throtl_grp *tg = pd_to_tg(pd);
974 u64 v = *(u64 *)((void *)tg + off);
978 return __blkg_prfill_u64(sf, pd, v);
981 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
984 struct throtl_grp *tg = pd_to_tg(pd);
985 unsigned int v = *(unsigned int *)((void *)tg + off);
989 return __blkg_prfill_u64(sf, pd, v);
992 static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
995 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
996 &blkcg_policy_throtl, cft->private, false);
1000 static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
1001 struct seq_file *sf)
1003 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
1004 &blkcg_policy_throtl, cft->private, false);
1008 static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
1011 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1012 struct blkg_conf_ctx ctx;
1013 struct throtl_grp *tg;
1014 struct throtl_data *td;
1017 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1021 tg = blkg_to_tg(ctx.blkg);
1022 td = ctx.blkg->q->td;
1028 *(u64 *)((void *)tg + cft->private) = ctx.v;
1030 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
1032 /* XXX: we don't need the following deferred processing */
1033 xchg(&tg->limits_changed, true);
1034 xchg(&td->limits_changed, true);
1035 throtl_schedule_delayed_work(td, 0);
1037 blkg_conf_finish(&ctx);
1041 static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1044 return tg_set_conf(cgrp, cft, buf, true);
1047 static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
1050 return tg_set_conf(cgrp, cft, buf, false);
1053 static struct cftype throtl_files[] = {
1055 .name = "throttle.read_bps_device",
1056 .private = offsetof(struct throtl_grp, bps[READ]),
1057 .read_seq_string = tg_print_conf_u64,
1058 .write_string = tg_set_conf_u64,
1059 .max_write_len = 256,
1062 .name = "throttle.write_bps_device",
1063 .private = offsetof(struct throtl_grp, bps[WRITE]),
1064 .read_seq_string = tg_print_conf_u64,
1065 .write_string = tg_set_conf_u64,
1066 .max_write_len = 256,
1069 .name = "throttle.read_iops_device",
1070 .private = offsetof(struct throtl_grp, iops[READ]),
1071 .read_seq_string = tg_print_conf_uint,
1072 .write_string = tg_set_conf_uint,
1073 .max_write_len = 256,
1076 .name = "throttle.write_iops_device",
1077 .private = offsetof(struct throtl_grp, iops[WRITE]),
1078 .read_seq_string = tg_print_conf_uint,
1079 .write_string = tg_set_conf_uint,
1080 .max_write_len = 256,
1083 .name = "throttle.io_service_bytes",
1084 .private = offsetof(struct tg_stats_cpu, service_bytes),
1085 .read_seq_string = tg_print_cpu_rwstat,
1088 .name = "throttle.io_serviced",
1089 .private = offsetof(struct tg_stats_cpu, serviced),
1090 .read_seq_string = tg_print_cpu_rwstat,
1095 static void throtl_shutdown_wq(struct request_queue *q)
1097 struct throtl_data *td = q->td;
1099 cancel_delayed_work_sync(&td->throtl_work);
1102 static struct blkcg_policy blkcg_policy_throtl = {
1104 .pd_init_fn = throtl_pd_init,
1105 .pd_exit_fn = throtl_pd_exit,
1106 .pd_reset_stats_fn = throtl_pd_reset_stats,
1108 .pd_size = sizeof(struct throtl_grp),
1109 .cftypes = throtl_files,
1112 bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1114 struct throtl_data *td = q->td;
1115 struct throtl_grp *tg;
1116 bool rw = bio_data_dir(bio), update_disptime = true;
1117 struct blkcg *blkcg;
1118 bool throttled = false;
1120 if (bio->bi_rw & REQ_THROTTLED) {
1121 bio->bi_rw &= ~REQ_THROTTLED;
1125 /* bio_associate_current() needs ioc, try creating */
1126 create_io_context(GFP_ATOMIC, q->node);
1129 * A throtl_grp pointer retrieved under rcu can be used to access
1130 * basic fields like stats and io rates. If a group has no rules,
1131 * just update the dispatch stats in lockless manner and return.
1134 blkcg = bio_blkcg(bio);
1135 tg = throtl_lookup_tg(td, blkcg);
1137 if (tg_no_rule_group(tg, rw)) {
1138 throtl_update_dispatch_stats(tg_to_blkg(tg),
1139 bio->bi_size, bio->bi_rw);
1140 goto out_unlock_rcu;
1145 * Either group has not been allocated yet or it is not an unlimited
1148 spin_lock_irq(q->queue_lock);
1149 tg = throtl_lookup_create_tg(td, blkcg);
1153 if (tg->nr_queued[rw]) {
1155 * There is already another bio queued in same dir. No
1156 * need to update dispatch time.
1158 update_disptime = false;
1163 /* Bio is with-in rate limit of group */
1164 if (tg_may_dispatch(td, tg, bio, NULL)) {
1165 throtl_charge_bio(tg, bio);
1168 * We need to trim slice even when bios are not being queued
1169 * otherwise it might happen that a bio is not queued for
1170 * a long time and slice keeps on extending and trim is not
1171 * called for a long time. Now if limits are reduced suddenly
1172 * we take into account all the IO dispatched so far at new
1173 * low rate and * newly queued IO gets a really long dispatch
1176 * So keep on trimming slice even if bio is not queued.
1178 throtl_trim_slice(td, tg, rw);
1183 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1184 " iodisp=%u iops=%u queued=%d/%d",
1185 rw == READ ? 'R' : 'W',
1186 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1187 tg->io_disp[rw], tg->iops[rw],
1188 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1190 bio_associate_current(bio);
1191 throtl_add_bio_tg(q->td, tg, bio);
1194 if (update_disptime) {
1195 tg_update_disptime(td, tg);
1196 throtl_schedule_next_dispatch(td);
1200 spin_unlock_irq(q->queue_lock);
1208 * blk_throtl_drain - drain throttled bios
1209 * @q: request_queue to drain throttled bios for
1211 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1213 void blk_throtl_drain(struct request_queue *q)
1214 __releases(q->queue_lock) __acquires(q->queue_lock)
1216 struct throtl_data *td = q->td;
1217 struct throtl_rb_root *st = &td->tg_service_tree;
1218 struct throtl_grp *tg;
1222 WARN_ON_ONCE(!queue_is_locked(q));
1226 while ((tg = throtl_rb_first(st))) {
1227 throtl_dequeue_tg(td, tg);
1229 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1230 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1231 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
1232 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1234 spin_unlock_irq(q->queue_lock);
1236 while ((bio = bio_list_pop(&bl)))
1237 generic_make_request(bio);
1239 spin_lock_irq(q->queue_lock);
1242 int blk_throtl_init(struct request_queue *q)
1244 struct throtl_data *td;
1247 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1251 td->tg_service_tree = THROTL_RB_ROOT;
1252 td->limits_changed = false;
1253 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1258 /* activate policy */
1259 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
1265 void blk_throtl_exit(struct request_queue *q)
1268 throtl_shutdown_wq(q);
1269 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
1273 static int __init throtl_init(void)
1275 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1276 if (!kthrotld_workqueue)
1277 panic("Failed to create kthrotld\n");
1279 return blkcg_policy_register(&blkcg_policy_throtl);
1282 module_init(throtl_init);