2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum = 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum = 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice = HZ/10; /* 100 ms */
24 static struct blkcg_policy blkcg_policy_throtl;
26 /* A workqueue to queue throttle related work */
27 static struct workqueue_struct *kthrotld_workqueue;
29 struct throtl_service_queue {
30 struct rb_root pending_tree; /* RB tree of active tgs */
31 struct rb_node *first_pending; /* first node in the tree */
32 unsigned int nr_pending; /* # queued in the tree */
33 unsigned long first_pending_disptime; /* disptime of the first tg */
36 #define THROTL_SERVICE_QUEUE_INITIALIZER \
37 (struct throtl_service_queue){ .pending_tree = RB_ROOT }
40 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
43 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
45 /* Per-cpu group stats */
47 /* total bytes transferred */
48 struct blkg_rwstat service_bytes;
49 /* total IOs serviced, post merge */
50 struct blkg_rwstat serviced;
54 /* must be the first member */
55 struct blkg_policy_data pd;
57 /* active throtl group service_queue member */
58 struct rb_node rb_node;
61 * Dispatch time in jiffies. This is the estimated time when group
62 * will unthrottle and is ready to dispatch more bio. It is used as
63 * key to sort active groups in service tree.
65 unsigned long disptime;
69 /* Two lists for READ and WRITE */
70 struct bio_list bio_lists[2];
72 /* Number of queued bios on READ and WRITE lists */
73 unsigned int nr_queued[2];
75 /* bytes per second rate limits */
81 /* Number of bytes disptached in current slice */
82 uint64_t bytes_disp[2];
83 /* Number of bio's dispatched in current slice */
84 unsigned int io_disp[2];
86 /* When did we start a new slice */
87 unsigned long slice_start[2];
88 unsigned long slice_end[2];
90 /* Per cpu stats pointer */
91 struct tg_stats_cpu __percpu *stats_cpu;
93 /* List of tgs waiting for per cpu stats memory to be allocated */
94 struct list_head stats_alloc_node;
99 /* service tree for active throtl groups */
100 struct throtl_service_queue service_queue;
102 struct request_queue *queue;
104 /* Total Number of queued bios on READ and WRITE lists */
105 unsigned int nr_queued[2];
108 * number of total undestroyed groups
110 unsigned int nr_undestroyed_grps;
112 /* Work for dispatching throttled bios */
113 struct delayed_work dispatch_work;
116 /* list and work item to allocate percpu group stats */
117 static DEFINE_SPINLOCK(tg_stats_alloc_lock);
118 static LIST_HEAD(tg_stats_alloc_list);
120 static void tg_stats_alloc_fn(struct work_struct *);
121 static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
123 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
125 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
128 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
130 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
133 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
135 return pd_to_blkg(&tg->pd);
138 static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
140 return blkg_to_tg(td->queue->root_blkg);
143 #define throtl_log_tg(td, tg, fmt, args...) do { \
146 blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
147 blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
150 #define throtl_log(td, fmt, args...) \
151 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
154 * Worker for allocating per cpu stat for tgs. This is scheduled on the
155 * system_wq once there are some groups on the alloc_list waiting for
158 static void tg_stats_alloc_fn(struct work_struct *work)
160 static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
161 struct delayed_work *dwork = to_delayed_work(work);
166 stats_cpu = alloc_percpu(struct tg_stats_cpu);
168 /* allocation failed, try again after some time */
169 schedule_delayed_work(dwork, msecs_to_jiffies(10));
174 spin_lock_irq(&tg_stats_alloc_lock);
176 if (!list_empty(&tg_stats_alloc_list)) {
177 struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
180 swap(tg->stats_cpu, stats_cpu);
181 list_del_init(&tg->stats_alloc_node);
184 empty = list_empty(&tg_stats_alloc_list);
185 spin_unlock_irq(&tg_stats_alloc_lock);
190 static void throtl_pd_init(struct blkcg_gq *blkg)
192 struct throtl_grp *tg = blkg_to_tg(blkg);
195 RB_CLEAR_NODE(&tg->rb_node);
196 bio_list_init(&tg->bio_lists[0]);
197 bio_list_init(&tg->bio_lists[1]);
202 tg->iops[WRITE] = -1;
205 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
206 * but percpu allocator can't be called from IO path. Queue tg on
207 * tg_stats_alloc_list and allocate from work item.
209 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
210 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
211 schedule_delayed_work(&tg_stats_alloc_work, 0);
212 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
215 static void throtl_pd_exit(struct blkcg_gq *blkg)
217 struct throtl_grp *tg = blkg_to_tg(blkg);
220 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
221 list_del_init(&tg->stats_alloc_node);
222 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
224 free_percpu(tg->stats_cpu);
227 static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
229 struct throtl_grp *tg = blkg_to_tg(blkg);
232 if (tg->stats_cpu == NULL)
235 for_each_possible_cpu(cpu) {
236 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
238 blkg_rwstat_reset(&sc->service_bytes);
239 blkg_rwstat_reset(&sc->serviced);
243 static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
247 * This is the common case when there are no blkcgs. Avoid lookup
250 if (blkcg == &blkcg_root)
251 return td_root_tg(td);
253 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
256 static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
259 struct request_queue *q = td->queue;
260 struct throtl_grp *tg = NULL;
263 * This is the common case when there are no blkcgs. Avoid lookup
266 if (blkcg == &blkcg_root) {
269 struct blkcg_gq *blkg;
271 blkg = blkg_lookup_create(blkcg, q);
273 /* if %NULL and @q is alive, fall back to root_tg */
275 tg = blkg_to_tg(blkg);
276 else if (!blk_queue_dying(q))
283 static struct throtl_grp *throtl_rb_first(struct throtl_service_queue *sq)
285 /* Service tree is empty */
289 if (!sq->first_pending)
290 sq->first_pending = rb_first(&sq->pending_tree);
292 if (sq->first_pending)
293 return rb_entry_tg(sq->first_pending);
298 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
304 static void throtl_rb_erase(struct rb_node *n, struct throtl_service_queue *sq)
306 if (sq->first_pending == n)
307 sq->first_pending = NULL;
308 rb_erase_init(n, &sq->pending_tree);
312 static void update_min_dispatch_time(struct throtl_service_queue *sq)
314 struct throtl_grp *tg;
316 tg = throtl_rb_first(sq);
320 sq->first_pending_disptime = tg->disptime;
323 static void tg_service_queue_add(struct throtl_service_queue *sq,
324 struct throtl_grp *tg)
326 struct rb_node **node = &sq->pending_tree.rb_node;
327 struct rb_node *parent = NULL;
328 struct throtl_grp *__tg;
329 unsigned long key = tg->disptime;
332 while (*node != NULL) {
334 __tg = rb_entry_tg(parent);
336 if (time_before(key, __tg->disptime))
337 node = &parent->rb_left;
339 node = &parent->rb_right;
345 sq->first_pending = &tg->rb_node;
347 rb_link_node(&tg->rb_node, parent, node);
348 rb_insert_color(&tg->rb_node, &sq->pending_tree);
351 static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
353 struct throtl_service_queue *sq = &td->service_queue;
355 tg_service_queue_add(sq, tg);
356 tg->flags |= THROTL_TG_PENDING;
360 static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
362 if (!(tg->flags & THROTL_TG_PENDING))
363 __throtl_enqueue_tg(td, tg);
366 static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
368 throtl_rb_erase(&tg->rb_node, &td->service_queue);
369 tg->flags &= ~THROTL_TG_PENDING;
372 static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
374 if (tg->flags & THROTL_TG_PENDING)
375 __throtl_dequeue_tg(td, tg);
378 /* Call with queue lock held */
379 static void throtl_schedule_delayed_work(struct throtl_data *td,
382 struct delayed_work *dwork = &td->dispatch_work;
384 mod_delayed_work(kthrotld_workqueue, dwork, delay);
385 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies);
388 static void throtl_schedule_next_dispatch(struct throtl_data *td)
390 struct throtl_service_queue *sq = &td->service_queue;
392 /* any pending children left? */
396 update_min_dispatch_time(sq);
398 if (time_before_eq(sq->first_pending_disptime, jiffies))
399 throtl_schedule_delayed_work(td, 0);
401 throtl_schedule_delayed_work(td, sq->first_pending_disptime - jiffies);
405 throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
407 tg->bytes_disp[rw] = 0;
409 tg->slice_start[rw] = jiffies;
410 tg->slice_end[rw] = jiffies + throtl_slice;
411 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
412 rw == READ ? 'R' : 'W', tg->slice_start[rw],
413 tg->slice_end[rw], jiffies);
416 static inline void throtl_set_slice_end(struct throtl_data *td,
417 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
419 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
422 static inline void throtl_extend_slice(struct throtl_data *td,
423 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
425 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
426 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
427 rw == READ ? 'R' : 'W', tg->slice_start[rw],
428 tg->slice_end[rw], jiffies);
431 /* Determine if previously allocated or extended slice is complete or not */
433 throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
435 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
441 /* Trim the used slices and adjust slice start accordingly */
443 throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
445 unsigned long nr_slices, time_elapsed, io_trim;
448 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
451 * If bps are unlimited (-1), then time slice don't get
452 * renewed. Don't try to trim the slice if slice is used. A new
453 * slice will start when appropriate.
455 if (throtl_slice_used(td, tg, rw))
459 * A bio has been dispatched. Also adjust slice_end. It might happen
460 * that initially cgroup limit was very low resulting in high
461 * slice_end, but later limit was bumped up and bio was dispached
462 * sooner, then we need to reduce slice_end. A high bogus slice_end
463 * is bad because it does not allow new slice to start.
466 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
468 time_elapsed = jiffies - tg->slice_start[rw];
470 nr_slices = time_elapsed / throtl_slice;
474 tmp = tg->bps[rw] * throtl_slice * nr_slices;
478 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
480 if (!bytes_trim && !io_trim)
483 if (tg->bytes_disp[rw] >= bytes_trim)
484 tg->bytes_disp[rw] -= bytes_trim;
486 tg->bytes_disp[rw] = 0;
488 if (tg->io_disp[rw] >= io_trim)
489 tg->io_disp[rw] -= io_trim;
493 tg->slice_start[rw] += nr_slices * throtl_slice;
495 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
496 " start=%lu end=%lu jiffies=%lu",
497 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
498 tg->slice_start[rw], tg->slice_end[rw], jiffies);
501 static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
502 struct bio *bio, unsigned long *wait)
504 bool rw = bio_data_dir(bio);
505 unsigned int io_allowed;
506 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
509 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
511 /* Slice has just started. Consider one slice interval */
513 jiffy_elapsed_rnd = throtl_slice;
515 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
518 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
519 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
520 * will allow dispatch after 1 second and after that slice should
524 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
528 io_allowed = UINT_MAX;
532 if (tg->io_disp[rw] + 1 <= io_allowed) {
538 /* Calc approx time to dispatch */
539 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
541 if (jiffy_wait > jiffy_elapsed)
542 jiffy_wait = jiffy_wait - jiffy_elapsed;
551 static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
552 struct bio *bio, unsigned long *wait)
554 bool rw = bio_data_dir(bio);
555 u64 bytes_allowed, extra_bytes, tmp;
556 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
558 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
560 /* Slice has just started. Consider one slice interval */
562 jiffy_elapsed_rnd = throtl_slice;
564 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
566 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
570 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
576 /* Calc approx time to dispatch */
577 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
578 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
584 * This wait time is without taking into consideration the rounding
585 * up we did. Add that time also.
587 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
593 static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
594 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
600 * Returns whether one can dispatch a bio or not. Also returns approx number
601 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
603 static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
604 struct bio *bio, unsigned long *wait)
606 bool rw = bio_data_dir(bio);
607 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
610 * Currently whole state machine of group depends on first bio
611 * queued in the group bio list. So one should not be calling
612 * this function with a different bio if there are other bios
615 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
617 /* If tg->bps = -1, then BW is unlimited */
618 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
625 * If previous slice expired, start a new one otherwise renew/extend
626 * existing slice to make sure it is at least throtl_slice interval
629 if (throtl_slice_used(td, tg, rw))
630 throtl_start_new_slice(td, tg, rw);
632 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
633 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
636 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
637 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
643 max_wait = max(bps_wait, iops_wait);
648 if (time_before(tg->slice_end[rw], jiffies + max_wait))
649 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
654 static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
657 struct throtl_grp *tg = blkg_to_tg(blkg);
658 struct tg_stats_cpu *stats_cpu;
661 /* If per cpu stats are not allocated yet, don't do any accounting. */
662 if (tg->stats_cpu == NULL)
666 * Disabling interrupts to provide mutual exclusion between two
667 * writes on same cpu. It probably is not needed for 64bit. Not
668 * optimizing that case yet.
670 local_irq_save(flags);
672 stats_cpu = this_cpu_ptr(tg->stats_cpu);
674 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
675 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
677 local_irq_restore(flags);
680 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
682 bool rw = bio_data_dir(bio);
684 /* Charge the bio to the group */
685 tg->bytes_disp[rw] += bio->bi_size;
688 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
691 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
694 bool rw = bio_data_dir(bio);
696 bio_list_add(&tg->bio_lists[rw], bio);
697 /* Take a bio reference on tg */
698 blkg_get(tg_to_blkg(tg));
701 throtl_enqueue_tg(td, tg);
704 static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
706 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
709 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
710 tg_may_dispatch(td, tg, bio, &read_wait);
712 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
713 tg_may_dispatch(td, tg, bio, &write_wait);
715 min_wait = min(read_wait, write_wait);
716 disptime = jiffies + min_wait;
718 /* Update dispatch time */
719 throtl_dequeue_tg(td, tg);
720 tg->disptime = disptime;
721 throtl_enqueue_tg(td, tg);
724 static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
725 bool rw, struct bio_list *bl)
729 bio = bio_list_pop(&tg->bio_lists[rw]);
731 /* Drop bio reference on blkg */
732 blkg_put(tg_to_blkg(tg));
734 BUG_ON(td->nr_queued[rw] <= 0);
737 throtl_charge_bio(tg, bio);
738 bio_list_add(bl, bio);
739 bio->bi_rw |= REQ_THROTTLED;
741 throtl_trim_slice(td, tg, rw);
744 static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
747 unsigned int nr_reads = 0, nr_writes = 0;
748 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
749 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
752 /* Try to dispatch 75% READS and 25% WRITES */
754 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
755 && tg_may_dispatch(td, tg, bio, NULL)) {
757 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
760 if (nr_reads >= max_nr_reads)
764 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
765 && tg_may_dispatch(td, tg, bio, NULL)) {
767 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
770 if (nr_writes >= max_nr_writes)
774 return nr_reads + nr_writes;
777 static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
779 unsigned int nr_disp = 0;
780 struct throtl_grp *tg;
781 struct throtl_service_queue *sq = &td->service_queue;
784 tg = throtl_rb_first(sq);
789 if (time_before(jiffies, tg->disptime))
792 throtl_dequeue_tg(td, tg);
794 nr_disp += throtl_dispatch_tg(td, tg, bl);
796 if (tg->nr_queued[0] || tg->nr_queued[1])
797 tg_update_disptime(td, tg);
799 if (nr_disp >= throtl_quantum)
806 /* work function to dispatch throttled bios */
807 void blk_throtl_dispatch_work_fn(struct work_struct *work)
809 struct throtl_data *td = container_of(to_delayed_work(work),
810 struct throtl_data, dispatch_work);
811 struct request_queue *q = td->queue;
812 unsigned int nr_disp = 0;
813 struct bio_list bio_list_on_stack;
815 struct blk_plug plug;
817 spin_lock_irq(q->queue_lock);
819 bio_list_init(&bio_list_on_stack);
821 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
822 td->nr_queued[READ] + td->nr_queued[WRITE],
823 td->nr_queued[READ], td->nr_queued[WRITE]);
825 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
828 throtl_log(td, "bios disp=%u", nr_disp);
830 throtl_schedule_next_dispatch(td);
832 spin_unlock_irq(q->queue_lock);
835 * If we dispatched some requests, unplug the queue to make sure
839 blk_start_plug(&plug);
840 while((bio = bio_list_pop(&bio_list_on_stack)))
841 generic_make_request(bio);
842 blk_finish_plug(&plug);
846 static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
847 struct blkg_policy_data *pd, int off)
849 struct throtl_grp *tg = pd_to_tg(pd);
850 struct blkg_rwstat rwstat = { }, tmp;
853 for_each_possible_cpu(cpu) {
854 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
856 tmp = blkg_rwstat_read((void *)sc + off);
857 for (i = 0; i < BLKG_RWSTAT_NR; i++)
858 rwstat.cnt[i] += tmp.cnt[i];
861 return __blkg_prfill_rwstat(sf, pd, &rwstat);
864 static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
867 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
869 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
874 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
877 struct throtl_grp *tg = pd_to_tg(pd);
878 u64 v = *(u64 *)((void *)tg + off);
882 return __blkg_prfill_u64(sf, pd, v);
885 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
888 struct throtl_grp *tg = pd_to_tg(pd);
889 unsigned int v = *(unsigned int *)((void *)tg + off);
893 return __blkg_prfill_u64(sf, pd, v);
896 static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
899 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
900 &blkcg_policy_throtl, cft->private, false);
904 static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
907 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
908 &blkcg_policy_throtl, cft->private, false);
912 static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
915 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
916 struct blkg_conf_ctx ctx;
917 struct throtl_grp *tg;
918 struct throtl_data *td;
921 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
925 tg = blkg_to_tg(ctx.blkg);
926 td = ctx.blkg->q->td;
932 *(u64 *)((void *)tg + cft->private) = ctx.v;
934 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
936 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
937 tg->bps[READ], tg->bps[WRITE],
938 tg->iops[READ], tg->iops[WRITE]);
941 * We're already holding queue_lock and know @tg is valid. Let's
942 * apply the new config directly.
944 * Restart the slices for both READ and WRITES. It might happen
945 * that a group's limit are dropped suddenly and we don't want to
946 * account recently dispatched IO with new low rate.
948 throtl_start_new_slice(td, tg, 0);
949 throtl_start_new_slice(td, tg, 1);
951 if (tg->flags & THROTL_TG_PENDING) {
952 tg_update_disptime(td, tg);
953 throtl_schedule_next_dispatch(td);
956 blkg_conf_finish(&ctx);
960 static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
963 return tg_set_conf(cgrp, cft, buf, true);
966 static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
969 return tg_set_conf(cgrp, cft, buf, false);
972 static struct cftype throtl_files[] = {
974 .name = "throttle.read_bps_device",
975 .private = offsetof(struct throtl_grp, bps[READ]),
976 .read_seq_string = tg_print_conf_u64,
977 .write_string = tg_set_conf_u64,
978 .max_write_len = 256,
981 .name = "throttle.write_bps_device",
982 .private = offsetof(struct throtl_grp, bps[WRITE]),
983 .read_seq_string = tg_print_conf_u64,
984 .write_string = tg_set_conf_u64,
985 .max_write_len = 256,
988 .name = "throttle.read_iops_device",
989 .private = offsetof(struct throtl_grp, iops[READ]),
990 .read_seq_string = tg_print_conf_uint,
991 .write_string = tg_set_conf_uint,
992 .max_write_len = 256,
995 .name = "throttle.write_iops_device",
996 .private = offsetof(struct throtl_grp, iops[WRITE]),
997 .read_seq_string = tg_print_conf_uint,
998 .write_string = tg_set_conf_uint,
999 .max_write_len = 256,
1002 .name = "throttle.io_service_bytes",
1003 .private = offsetof(struct tg_stats_cpu, service_bytes),
1004 .read_seq_string = tg_print_cpu_rwstat,
1007 .name = "throttle.io_serviced",
1008 .private = offsetof(struct tg_stats_cpu, serviced),
1009 .read_seq_string = tg_print_cpu_rwstat,
1014 static void throtl_shutdown_wq(struct request_queue *q)
1016 struct throtl_data *td = q->td;
1018 cancel_delayed_work_sync(&td->dispatch_work);
1021 static struct blkcg_policy blkcg_policy_throtl = {
1022 .pd_size = sizeof(struct throtl_grp),
1023 .cftypes = throtl_files,
1025 .pd_init_fn = throtl_pd_init,
1026 .pd_exit_fn = throtl_pd_exit,
1027 .pd_reset_stats_fn = throtl_pd_reset_stats,
1030 bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1032 struct throtl_data *td = q->td;
1033 struct throtl_grp *tg;
1034 bool rw = bio_data_dir(bio), update_disptime = true;
1035 struct blkcg *blkcg;
1036 bool throttled = false;
1038 if (bio->bi_rw & REQ_THROTTLED) {
1039 bio->bi_rw &= ~REQ_THROTTLED;
1044 * A throtl_grp pointer retrieved under rcu can be used to access
1045 * basic fields like stats and io rates. If a group has no rules,
1046 * just update the dispatch stats in lockless manner and return.
1049 blkcg = bio_blkcg(bio);
1050 tg = throtl_lookup_tg(td, blkcg);
1052 if (tg_no_rule_group(tg, rw)) {
1053 throtl_update_dispatch_stats(tg_to_blkg(tg),
1054 bio->bi_size, bio->bi_rw);
1055 goto out_unlock_rcu;
1060 * Either group has not been allocated yet or it is not an unlimited
1063 spin_lock_irq(q->queue_lock);
1064 tg = throtl_lookup_create_tg(td, blkcg);
1068 if (tg->nr_queued[rw]) {
1070 * There is already another bio queued in same dir. No
1071 * need to update dispatch time.
1073 update_disptime = false;
1078 /* Bio is with-in rate limit of group */
1079 if (tg_may_dispatch(td, tg, bio, NULL)) {
1080 throtl_charge_bio(tg, bio);
1083 * We need to trim slice even when bios are not being queued
1084 * otherwise it might happen that a bio is not queued for
1085 * a long time and slice keeps on extending and trim is not
1086 * called for a long time. Now if limits are reduced suddenly
1087 * we take into account all the IO dispatched so far at new
1088 * low rate and * newly queued IO gets a really long dispatch
1091 * So keep on trimming slice even if bio is not queued.
1093 throtl_trim_slice(td, tg, rw);
1098 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1099 " iodisp=%u iops=%u queued=%d/%d",
1100 rw == READ ? 'R' : 'W',
1101 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1102 tg->io_disp[rw], tg->iops[rw],
1103 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1105 bio_associate_current(bio);
1106 throtl_add_bio_tg(q->td, tg, bio);
1109 if (update_disptime) {
1110 tg_update_disptime(td, tg);
1111 throtl_schedule_next_dispatch(td);
1115 spin_unlock_irq(q->queue_lock);
1123 * blk_throtl_drain - drain throttled bios
1124 * @q: request_queue to drain throttled bios for
1126 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1128 void blk_throtl_drain(struct request_queue *q)
1129 __releases(q->queue_lock) __acquires(q->queue_lock)
1131 struct throtl_data *td = q->td;
1132 struct throtl_service_queue *sq = &td->service_queue;
1133 struct throtl_grp *tg;
1137 queue_lockdep_assert_held(q);
1141 while ((tg = throtl_rb_first(sq))) {
1142 throtl_dequeue_tg(td, tg);
1144 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1145 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1146 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
1147 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1149 spin_unlock_irq(q->queue_lock);
1151 while ((bio = bio_list_pop(&bl)))
1152 generic_make_request(bio);
1154 spin_lock_irq(q->queue_lock);
1157 int blk_throtl_init(struct request_queue *q)
1159 struct throtl_data *td;
1162 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1166 td->service_queue = THROTL_SERVICE_QUEUE_INITIALIZER;
1167 INIT_DELAYED_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1172 /* activate policy */
1173 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
1179 void blk_throtl_exit(struct request_queue *q)
1182 throtl_shutdown_wq(q);
1183 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
1187 static int __init throtl_init(void)
1189 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1190 if (!kthrotld_workqueue)
1191 panic("Failed to create kthrotld\n");
1193 return blkcg_policy_register(&blkcg_policy_throtl);
1196 module_init(throtl_init);