1 // SPDX-License-Identifier: GPL-2.0
3 * Interface for controlling IO bandwidth on a request queue
5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
14 #include "blk-cgroup-rwstat.h"
16 #include "blk-throttle.h"
18 /* Max dispatch from a group in 1 round */
19 #define THROTL_GRP_QUANTUM 8
21 /* Total max dispatch from all groups in one round */
22 #define THROTL_QUANTUM 32
24 /* Throttling is performed over a slice and after that slice is renewed */
25 #define DFL_THROTL_SLICE_HD (HZ / 10)
26 #define DFL_THROTL_SLICE_SSD (HZ / 50)
27 #define MAX_THROTL_SLICE (HZ)
29 /* A workqueue to queue throttle related work */
30 static struct workqueue_struct *kthrotld_workqueue;
32 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
36 /* service tree for active throtl groups */
37 struct throtl_service_queue service_queue;
39 struct request_queue *queue;
41 /* Total Number of queued bios on READ and WRITE lists */
42 unsigned int nr_queued[2];
44 unsigned int throtl_slice;
46 /* Work for dispatching throttled bios */
47 struct work_struct dispatch_work;
49 bool track_bio_latency;
52 static void throtl_pending_timer_fn(struct timer_list *t);
54 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
56 return pd_to_blkg(&tg->pd);
60 * sq_to_tg - return the throl_grp the specified service queue belongs to
61 * @sq: the throtl_service_queue of interest
63 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
64 * embedded in throtl_data, %NULL is returned.
66 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
68 if (sq && sq->parent_sq)
69 return container_of(sq, struct throtl_grp, service_queue);
75 * sq_to_td - return throtl_data the specified service queue belongs to
76 * @sq: the throtl_service_queue of interest
78 * A service_queue can be embedded in either a throtl_grp or throtl_data.
79 * Determine the associated throtl_data accordingly and return it.
81 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
83 struct throtl_grp *tg = sq_to_tg(sq);
88 return container_of(sq, struct throtl_data, service_queue);
91 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
93 struct blkcg_gq *blkg = tg_to_blkg(tg);
95 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
101 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
103 struct blkcg_gq *blkg = tg_to_blkg(tg);
105 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
112 * throtl_log - log debug message via blktrace
113 * @sq: the service_queue being reported
114 * @fmt: printf format string
117 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
118 * throtl_grp; otherwise, just "throtl".
120 #define throtl_log(sq, fmt, args...) do { \
121 struct throtl_grp *__tg = sq_to_tg((sq)); \
122 struct throtl_data *__td = sq_to_td((sq)); \
125 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
128 blk_add_cgroup_trace_msg(__td->queue, \
129 &tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\
131 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
135 static inline unsigned int throtl_bio_data_size(struct bio *bio)
137 /* assume it's one sector */
138 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
140 return bio->bi_iter.bi_size;
143 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
145 INIT_LIST_HEAD(&qn->node);
146 bio_list_init(&qn->bios);
151 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
152 * @bio: bio being added
153 * @qn: qnode to add bio to
154 * @queued: the service_queue->queued[] list @qn belongs to
156 * Add @bio to @qn and put @qn on @queued if it's not already on.
157 * @qn->tg's reference count is bumped when @qn is activated. See the
158 * comment on top of throtl_qnode definition for details.
160 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
161 struct list_head *queued)
163 bio_list_add(&qn->bios, bio);
164 if (list_empty(&qn->node)) {
165 list_add_tail(&qn->node, queued);
166 blkg_get(tg_to_blkg(qn->tg));
171 * throtl_peek_queued - peek the first bio on a qnode list
172 * @queued: the qnode list to peek
174 static struct bio *throtl_peek_queued(struct list_head *queued)
176 struct throtl_qnode *qn;
179 if (list_empty(queued))
182 qn = list_first_entry(queued, struct throtl_qnode, node);
183 bio = bio_list_peek(&qn->bios);
189 * throtl_pop_queued - pop the first bio form a qnode list
190 * @queued: the qnode list to pop a bio from
191 * @tg_to_put: optional out argument for throtl_grp to put
193 * Pop the first bio from the qnode list @queued. After popping, the first
194 * qnode is removed from @queued if empty or moved to the end of @queued so
195 * that the popping order is round-robin.
197 * When the first qnode is removed, its associated throtl_grp should be put
198 * too. If @tg_to_put is NULL, this function automatically puts it;
199 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
200 * responsible for putting it.
202 static struct bio *throtl_pop_queued(struct list_head *queued,
203 struct throtl_grp **tg_to_put)
205 struct throtl_qnode *qn;
208 if (list_empty(queued))
211 qn = list_first_entry(queued, struct throtl_qnode, node);
212 bio = bio_list_pop(&qn->bios);
215 if (bio_list_empty(&qn->bios)) {
216 list_del_init(&qn->node);
220 blkg_put(tg_to_blkg(qn->tg));
222 list_move_tail(&qn->node, queued);
228 /* init a service_queue, assumes the caller zeroed it */
229 static void throtl_service_queue_init(struct throtl_service_queue *sq)
231 INIT_LIST_HEAD(&sq->queued[READ]);
232 INIT_LIST_HEAD(&sq->queued[WRITE]);
233 sq->pending_tree = RB_ROOT_CACHED;
234 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
237 static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk,
238 struct blkcg *blkcg, gfp_t gfp)
240 struct throtl_grp *tg;
243 tg = kzalloc_node(sizeof(*tg), gfp, disk->node_id);
247 if (blkg_rwstat_init(&tg->stat_bytes, gfp))
250 if (blkg_rwstat_init(&tg->stat_ios, gfp))
251 goto err_exit_stat_bytes;
253 throtl_service_queue_init(&tg->service_queue);
255 for (rw = READ; rw <= WRITE; rw++) {
256 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
257 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
260 RB_CLEAR_NODE(&tg->rb_node);
261 tg->bps[READ] = U64_MAX;
262 tg->bps[WRITE] = U64_MAX;
263 tg->iops[READ] = UINT_MAX;
264 tg->iops[WRITE] = UINT_MAX;
269 blkg_rwstat_exit(&tg->stat_bytes);
275 static void throtl_pd_init(struct blkg_policy_data *pd)
277 struct throtl_grp *tg = pd_to_tg(pd);
278 struct blkcg_gq *blkg = tg_to_blkg(tg);
279 struct throtl_data *td = blkg->q->td;
280 struct throtl_service_queue *sq = &tg->service_queue;
283 * If on the default hierarchy, we switch to properly hierarchical
284 * behavior where limits on a given throtl_grp are applied to the
285 * whole subtree rather than just the group itself. e.g. If 16M
286 * read_bps limit is set on a parent group, summary bps of
287 * parent group and its subtree groups can't exceed 16M for the
290 * If not on the default hierarchy, the broken flat hierarchy
291 * behavior is retained where all throtl_grps are treated as if
292 * they're all separate root groups right below throtl_data.
293 * Limits of a group don't interact with limits of other groups
294 * regardless of the position of the group in the hierarchy.
296 sq->parent_sq = &td->service_queue;
297 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
298 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
303 * Set has_rules[] if @tg or any of its parents have limits configured.
304 * This doesn't require walking up to the top of the hierarchy as the
305 * parent's has_rules[] is guaranteed to be correct.
307 static void tg_update_has_rules(struct throtl_grp *tg)
309 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
312 for (rw = READ; rw <= WRITE; rw++) {
313 tg->has_rules_iops[rw] =
314 (parent_tg && parent_tg->has_rules_iops[rw]) ||
315 tg_iops_limit(tg, rw) != UINT_MAX;
316 tg->has_rules_bps[rw] =
317 (parent_tg && parent_tg->has_rules_bps[rw]) ||
318 tg_bps_limit(tg, rw) != U64_MAX;
322 static void throtl_pd_online(struct blkg_policy_data *pd)
324 struct throtl_grp *tg = pd_to_tg(pd);
326 * We don't want new groups to escape the limits of its ancestors.
327 * Update has_rules[] after a new group is brought online.
329 tg_update_has_rules(tg);
332 static void throtl_pd_free(struct blkg_policy_data *pd)
334 struct throtl_grp *tg = pd_to_tg(pd);
336 del_timer_sync(&tg->service_queue.pending_timer);
337 blkg_rwstat_exit(&tg->stat_bytes);
338 blkg_rwstat_exit(&tg->stat_ios);
342 static struct throtl_grp *
343 throtl_rb_first(struct throtl_service_queue *parent_sq)
347 n = rb_first_cached(&parent_sq->pending_tree);
351 return rb_entry_tg(n);
354 static void throtl_rb_erase(struct rb_node *n,
355 struct throtl_service_queue *parent_sq)
357 rb_erase_cached(n, &parent_sq->pending_tree);
361 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
363 struct throtl_grp *tg;
365 tg = throtl_rb_first(parent_sq);
369 parent_sq->first_pending_disptime = tg->disptime;
372 static void tg_service_queue_add(struct throtl_grp *tg)
374 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
375 struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
376 struct rb_node *parent = NULL;
377 struct throtl_grp *__tg;
378 unsigned long key = tg->disptime;
379 bool leftmost = true;
381 while (*node != NULL) {
383 __tg = rb_entry_tg(parent);
385 if (time_before(key, __tg->disptime))
386 node = &parent->rb_left;
388 node = &parent->rb_right;
393 rb_link_node(&tg->rb_node, parent, node);
394 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
398 static void throtl_enqueue_tg(struct throtl_grp *tg)
400 if (!(tg->flags & THROTL_TG_PENDING)) {
401 tg_service_queue_add(tg);
402 tg->flags |= THROTL_TG_PENDING;
403 tg->service_queue.parent_sq->nr_pending++;
407 static void throtl_dequeue_tg(struct throtl_grp *tg)
409 if (tg->flags & THROTL_TG_PENDING) {
410 struct throtl_service_queue *parent_sq =
411 tg->service_queue.parent_sq;
413 throtl_rb_erase(&tg->rb_node, parent_sq);
414 --parent_sq->nr_pending;
415 tg->flags &= ~THROTL_TG_PENDING;
419 /* Call with queue lock held */
420 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
421 unsigned long expires)
423 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
426 * Since we are adjusting the throttle limit dynamically, the sleep
427 * time calculated according to previous limit might be invalid. It's
428 * possible the cgroup sleep time is very long and no other cgroups
429 * have IO running so notify the limit changes. Make sure the cgroup
430 * doesn't sleep too long to avoid the missed notification.
432 if (time_after(expires, max_expire))
433 expires = max_expire;
434 mod_timer(&sq->pending_timer, expires);
435 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
436 expires - jiffies, jiffies);
440 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
441 * @sq: the service_queue to schedule dispatch for
442 * @force: force scheduling
444 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
445 * dispatch time of the first pending child. Returns %true if either timer
446 * is armed or there's no pending child left. %false if the current
447 * dispatch window is still open and the caller should continue
450 * If @force is %true, the dispatch timer is always scheduled and this
451 * function is guaranteed to return %true. This is to be used when the
452 * caller can't dispatch itself and needs to invoke pending_timer
453 * unconditionally. Note that forced scheduling is likely to induce short
454 * delay before dispatch starts even if @sq->first_pending_disptime is not
455 * in the future and thus shouldn't be used in hot paths.
457 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
460 /* any pending children left? */
464 update_min_dispatch_time(sq);
466 /* is the next dispatch time in the future? */
467 if (force || time_after(sq->first_pending_disptime, jiffies)) {
468 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
472 /* tell the caller to continue dispatching */
476 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
477 bool rw, unsigned long start)
479 tg->bytes_disp[rw] = 0;
481 tg->carryover_bytes[rw] = 0;
482 tg->carryover_ios[rw] = 0;
485 * Previous slice has expired. We must have trimmed it after last
486 * bio dispatch. That means since start of last slice, we never used
487 * that bandwidth. Do try to make use of that bandwidth while giving
490 if (time_after(start, tg->slice_start[rw]))
491 tg->slice_start[rw] = start;
493 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
494 throtl_log(&tg->service_queue,
495 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
496 rw == READ ? 'R' : 'W', tg->slice_start[rw],
497 tg->slice_end[rw], jiffies);
500 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
501 bool clear_carryover)
503 tg->bytes_disp[rw] = 0;
505 tg->slice_start[rw] = jiffies;
506 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
507 if (clear_carryover) {
508 tg->carryover_bytes[rw] = 0;
509 tg->carryover_ios[rw] = 0;
512 throtl_log(&tg->service_queue,
513 "[%c] new slice start=%lu end=%lu jiffies=%lu",
514 rw == READ ? 'R' : 'W', tg->slice_start[rw],
515 tg->slice_end[rw], jiffies);
518 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
519 unsigned long jiffy_end)
521 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
524 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
525 unsigned long jiffy_end)
527 throtl_set_slice_end(tg, rw, jiffy_end);
528 throtl_log(&tg->service_queue,
529 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
530 rw == READ ? 'R' : 'W', tg->slice_start[rw],
531 tg->slice_end[rw], jiffies);
534 /* Determine if previously allocated or extended slice is complete or not */
535 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
537 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
543 static unsigned int calculate_io_allowed(u32 iops_limit,
544 unsigned long jiffy_elapsed)
546 unsigned int io_allowed;
550 * jiffy_elapsed should not be a big value as minimum iops can be
551 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
552 * will allow dispatch after 1 second and after that slice should
556 tmp = (u64)iops_limit * jiffy_elapsed;
560 io_allowed = UINT_MAX;
567 static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
570 * Can result be wider than 64 bits?
571 * We check against 62, not 64, due to ilog2 truncation.
573 if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62)
575 return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
578 /* Trim the used slices and adjust slice start accordingly */
579 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
581 unsigned long time_elapsed;
582 long long bytes_trim;
585 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
588 * If bps are unlimited (-1), then time slice don't get
589 * renewed. Don't try to trim the slice if slice is used. A new
590 * slice will start when appropriate.
592 if (throtl_slice_used(tg, rw))
596 * A bio has been dispatched. Also adjust slice_end. It might happen
597 * that initially cgroup limit was very low resulting in high
598 * slice_end, but later limit was bumped up and bio was dispatched
599 * sooner, then we need to reduce slice_end. A high bogus slice_end
600 * is bad because it does not allow new slice to start.
603 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
605 time_elapsed = rounddown(jiffies - tg->slice_start[rw],
606 tg->td->throtl_slice);
610 bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
612 tg->carryover_bytes[rw];
613 io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
614 tg->carryover_ios[rw];
615 if (bytes_trim <= 0 && io_trim <= 0)
618 tg->carryover_bytes[rw] = 0;
619 if ((long long)tg->bytes_disp[rw] >= bytes_trim)
620 tg->bytes_disp[rw] -= bytes_trim;
622 tg->bytes_disp[rw] = 0;
624 tg->carryover_ios[rw] = 0;
625 if ((int)tg->io_disp[rw] >= io_trim)
626 tg->io_disp[rw] -= io_trim;
630 tg->slice_start[rw] += time_elapsed;
632 throtl_log(&tg->service_queue,
633 "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
634 rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
635 bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
639 static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
641 unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
642 u64 bps_limit = tg_bps_limit(tg, rw);
643 u32 iops_limit = tg_iops_limit(tg, rw);
646 * If config is updated while bios are still throttled, calculate and
647 * accumulate how many bytes/ios are waited across changes. And
648 * carryover_bytes/ios will be used to calculate new wait time under new
651 if (bps_limit != U64_MAX)
652 tg->carryover_bytes[rw] +=
653 calculate_bytes_allowed(bps_limit, jiffy_elapsed) -
655 if (iops_limit != UINT_MAX)
656 tg->carryover_ios[rw] +=
657 calculate_io_allowed(iops_limit, jiffy_elapsed) -
661 static void tg_update_carryover(struct throtl_grp *tg)
663 if (tg->service_queue.nr_queued[READ])
664 __tg_update_carryover(tg, READ);
665 if (tg->service_queue.nr_queued[WRITE])
666 __tg_update_carryover(tg, WRITE);
668 /* see comments in struct throtl_grp for meaning of these fields. */
669 throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__,
670 tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
671 tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
674 static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
677 bool rw = bio_data_dir(bio);
679 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
681 if (iops_limit == UINT_MAX) {
685 jiffy_elapsed = jiffies - tg->slice_start[rw];
687 /* Round up to the next throttle slice, wait time must be nonzero */
688 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
689 io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) +
690 tg->carryover_ios[rw];
691 if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
694 /* Calc approx time to dispatch */
695 jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
697 /* make sure at least one io can be dispatched after waiting */
698 jiffy_wait = max(jiffy_wait, HZ / iops_limit + 1);
702 static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
705 bool rw = bio_data_dir(bio);
706 long long bytes_allowed;
708 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
709 unsigned int bio_size = throtl_bio_data_size(bio);
711 /* no need to throttle if this bio's bytes have been accounted */
712 if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) {
716 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
718 /* Slice has just started. Consider one slice interval */
720 jiffy_elapsed_rnd = tg->td->throtl_slice;
722 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
723 bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) +
724 tg->carryover_bytes[rw];
725 if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
728 /* Calc approx time to dispatch */
729 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
730 jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
736 * This wait time is without taking into consideration the rounding
737 * up we did. Add that time also.
739 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
744 * Returns whether one can dispatch a bio or not. Also returns approx number
745 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
747 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
750 bool rw = bio_data_dir(bio);
751 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
752 u64 bps_limit = tg_bps_limit(tg, rw);
753 u32 iops_limit = tg_iops_limit(tg, rw);
756 * Currently whole state machine of group depends on first bio
757 * queued in the group bio list. So one should not be calling
758 * this function with a different bio if there are other bios
761 BUG_ON(tg->service_queue.nr_queued[rw] &&
762 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
764 /* If tg->bps = -1, then BW is unlimited */
765 if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) ||
766 tg->flags & THROTL_TG_CANCELING) {
773 * If previous slice expired, start a new one otherwise renew/extend
774 * existing slice to make sure it is at least throtl_slice interval
775 * long since now. New slice is started only for empty throttle group.
776 * If there is queued bio, that means there should be an active
777 * slice and it should be extended instead.
779 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
780 throtl_start_new_slice(tg, rw, true);
782 if (time_before(tg->slice_end[rw],
783 jiffies + tg->td->throtl_slice))
784 throtl_extend_slice(tg, rw,
785 jiffies + tg->td->throtl_slice);
788 bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
789 iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
790 if (bps_wait + iops_wait == 0) {
796 max_wait = max(bps_wait, iops_wait);
801 if (time_before(tg->slice_end[rw], jiffies + max_wait))
802 throtl_extend_slice(tg, rw, jiffies + max_wait);
807 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
809 bool rw = bio_data_dir(bio);
810 unsigned int bio_size = throtl_bio_data_size(bio);
812 /* Charge the bio to the group */
813 if (!bio_flagged(bio, BIO_BPS_THROTTLED)) {
814 tg->bytes_disp[rw] += bio_size;
815 tg->last_bytes_disp[rw] += bio_size;
819 tg->last_io_disp[rw]++;
823 * throtl_add_bio_tg - add a bio to the specified throtl_grp
826 * @tg: the target throtl_grp
828 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
829 * tg->qnode_on_self[] is used.
831 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
832 struct throtl_grp *tg)
834 struct throtl_service_queue *sq = &tg->service_queue;
835 bool rw = bio_data_dir(bio);
838 qn = &tg->qnode_on_self[rw];
841 * If @tg doesn't currently have any bios queued in the same
842 * direction, queueing @bio can change when @tg should be
843 * dispatched. Mark that @tg was empty. This is automatically
844 * cleared on the next tg_update_disptime().
846 if (!sq->nr_queued[rw])
847 tg->flags |= THROTL_TG_WAS_EMPTY;
849 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
852 throtl_enqueue_tg(tg);
855 static void tg_update_disptime(struct throtl_grp *tg)
857 struct throtl_service_queue *sq = &tg->service_queue;
858 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
861 bio = throtl_peek_queued(&sq->queued[READ]);
863 tg_may_dispatch(tg, bio, &read_wait);
865 bio = throtl_peek_queued(&sq->queued[WRITE]);
867 tg_may_dispatch(tg, bio, &write_wait);
869 min_wait = min(read_wait, write_wait);
870 disptime = jiffies + min_wait;
872 /* Update dispatch time */
873 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
874 tg->disptime = disptime;
875 tg_service_queue_add(tg);
877 /* see throtl_add_bio_tg() */
878 tg->flags &= ~THROTL_TG_WAS_EMPTY;
881 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
882 struct throtl_grp *parent_tg, bool rw)
884 if (throtl_slice_used(parent_tg, rw)) {
885 throtl_start_new_slice_with_credit(parent_tg, rw,
886 child_tg->slice_start[rw]);
891 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
893 struct throtl_service_queue *sq = &tg->service_queue;
894 struct throtl_service_queue *parent_sq = sq->parent_sq;
895 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
896 struct throtl_grp *tg_to_put = NULL;
900 * @bio is being transferred from @tg to @parent_sq. Popping a bio
901 * from @tg may put its reference and @parent_sq might end up
902 * getting released prematurely. Remember the tg to put and put it
903 * after @bio is transferred to @parent_sq.
905 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
908 throtl_charge_bio(tg, bio);
911 * If our parent is another tg, we just need to transfer @bio to
912 * the parent using throtl_add_bio_tg(). If our parent is
913 * @td->service_queue, @bio is ready to be issued. Put it on its
914 * bio_lists[] and decrease total number queued. The caller is
915 * responsible for issuing these bios.
918 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
919 start_parent_slice_with_credit(tg, parent_tg, rw);
921 bio_set_flag(bio, BIO_BPS_THROTTLED);
922 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
923 &parent_sq->queued[rw]);
924 BUG_ON(tg->td->nr_queued[rw] <= 0);
925 tg->td->nr_queued[rw]--;
928 throtl_trim_slice(tg, rw);
931 blkg_put(tg_to_blkg(tg_to_put));
934 static int throtl_dispatch_tg(struct throtl_grp *tg)
936 struct throtl_service_queue *sq = &tg->service_queue;
937 unsigned int nr_reads = 0, nr_writes = 0;
938 unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
939 unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
942 /* Try to dispatch 75% READS and 25% WRITES */
944 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
945 tg_may_dispatch(tg, bio, NULL)) {
947 tg_dispatch_one_bio(tg, READ);
950 if (nr_reads >= max_nr_reads)
954 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
955 tg_may_dispatch(tg, bio, NULL)) {
957 tg_dispatch_one_bio(tg, WRITE);
960 if (nr_writes >= max_nr_writes)
964 return nr_reads + nr_writes;
967 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
969 unsigned int nr_disp = 0;
972 struct throtl_grp *tg;
973 struct throtl_service_queue *sq;
975 if (!parent_sq->nr_pending)
978 tg = throtl_rb_first(parent_sq);
982 if (time_before(jiffies, tg->disptime))
985 nr_disp += throtl_dispatch_tg(tg);
987 sq = &tg->service_queue;
988 if (sq->nr_queued[READ] || sq->nr_queued[WRITE])
989 tg_update_disptime(tg);
991 throtl_dequeue_tg(tg);
993 if (nr_disp >= THROTL_QUANTUM)
1001 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1002 * @t: the pending_timer member of the throtl_service_queue being serviced
1004 * This timer is armed when a child throtl_grp with active bio's become
1005 * pending and queued on the service_queue's pending_tree and expires when
1006 * the first child throtl_grp should be dispatched. This function
1007 * dispatches bio's from the children throtl_grps to the parent
1010 * If the parent's parent is another throtl_grp, dispatching is propagated
1011 * by either arming its pending_timer or repeating dispatch directly. If
1012 * the top-level service_tree is reached, throtl_data->dispatch_work is
1013 * kicked so that the ready bio's are issued.
1015 static void throtl_pending_timer_fn(struct timer_list *t)
1017 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1018 struct throtl_grp *tg = sq_to_tg(sq);
1019 struct throtl_data *td = sq_to_td(sq);
1020 struct throtl_service_queue *parent_sq;
1021 struct request_queue *q;
1025 /* throtl_data may be gone, so figure out request queue by blkg */
1031 spin_lock_irq(&q->queue_lock);
1037 parent_sq = sq->parent_sq;
1041 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1042 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1043 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1045 ret = throtl_select_dispatch(sq);
1047 throtl_log(sq, "bios disp=%u", ret);
1051 if (throtl_schedule_next_dispatch(sq, false))
1054 /* this dispatch windows is still open, relax and repeat */
1055 spin_unlock_irq(&q->queue_lock);
1057 spin_lock_irq(&q->queue_lock);
1064 /* @parent_sq is another throl_grp, propagate dispatch */
1065 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1066 tg_update_disptime(tg);
1067 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1068 /* window is already open, repeat dispatching */
1075 /* reached the top-level, queue issuing */
1076 queue_work(kthrotld_workqueue, &td->dispatch_work);
1079 spin_unlock_irq(&q->queue_lock);
1083 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1084 * @work: work item being executed
1086 * This function is queued for execution when bios reach the bio_lists[]
1087 * of throtl_data->service_queue. Those bios are ready and issued by this
1090 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1092 struct throtl_data *td = container_of(work, struct throtl_data,
1094 struct throtl_service_queue *td_sq = &td->service_queue;
1095 struct request_queue *q = td->queue;
1096 struct bio_list bio_list_on_stack;
1098 struct blk_plug plug;
1101 bio_list_init(&bio_list_on_stack);
1103 spin_lock_irq(&q->queue_lock);
1104 for (rw = READ; rw <= WRITE; rw++)
1105 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1106 bio_list_add(&bio_list_on_stack, bio);
1107 spin_unlock_irq(&q->queue_lock);
1109 if (!bio_list_empty(&bio_list_on_stack)) {
1110 blk_start_plug(&plug);
1111 while ((bio = bio_list_pop(&bio_list_on_stack)))
1112 submit_bio_noacct_nocheck(bio);
1113 blk_finish_plug(&plug);
1117 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1120 struct throtl_grp *tg = pd_to_tg(pd);
1121 u64 v = *(u64 *)((void *)tg + off);
1125 return __blkg_prfill_u64(sf, pd, v);
1128 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1131 struct throtl_grp *tg = pd_to_tg(pd);
1132 unsigned int v = *(unsigned int *)((void *)tg + off);
1136 return __blkg_prfill_u64(sf, pd, v);
1139 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1141 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1142 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1146 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1148 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1149 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1153 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1155 struct throtl_service_queue *sq = &tg->service_queue;
1156 struct cgroup_subsys_state *pos_css;
1157 struct blkcg_gq *blkg;
1159 throtl_log(&tg->service_queue,
1160 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1161 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1162 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1166 * Update has_rules[] flags for the updated tg's subtree. A tg is
1167 * considered to have rules if either the tg itself or any of its
1168 * ancestors has rules. This identifies groups without any
1169 * restrictions in the whole hierarchy and allows them to bypass
1172 blkg_for_each_descendant_pre(blkg, pos_css,
1173 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1174 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1176 tg_update_has_rules(this_tg);
1177 /* ignore root/second level */
1178 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1179 !blkg->parent->parent)
1185 * We're already holding queue_lock and know @tg is valid. Let's
1186 * apply the new config directly.
1188 * Restart the slices for both READ and WRITES. It might happen
1189 * that a group's limit are dropped suddenly and we don't want to
1190 * account recently dispatched IO with new low rate.
1192 throtl_start_new_slice(tg, READ, false);
1193 throtl_start_new_slice(tg, WRITE, false);
1195 if (tg->flags & THROTL_TG_PENDING) {
1196 tg_update_disptime(tg);
1197 throtl_schedule_next_dispatch(sq->parent_sq, true);
1201 static int blk_throtl_init(struct gendisk *disk)
1203 struct request_queue *q = disk->queue;
1204 struct throtl_data *td;
1207 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1211 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1212 throtl_service_queue_init(&td->service_queue);
1215 * Freeze queue before activating policy, to synchronize with IO path,
1216 * which is protected by 'q_usage_counter'.
1218 blk_mq_freeze_queue(disk->queue);
1219 blk_mq_quiesce_queue(disk->queue);
1224 /* activate policy */
1225 ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
1232 if (blk_queue_nonrot(q))
1233 td->throtl_slice = DFL_THROTL_SLICE_SSD;
1235 td->throtl_slice = DFL_THROTL_SLICE_HD;
1236 td->track_bio_latency = !queue_is_mq(q);
1237 if (!td->track_bio_latency)
1238 blk_stat_enable_accounting(q);
1241 blk_mq_unquiesce_queue(disk->queue);
1242 blk_mq_unfreeze_queue(disk->queue);
1248 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1249 char *buf, size_t nbytes, loff_t off, bool is_u64)
1251 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1252 struct blkg_conf_ctx ctx;
1253 struct throtl_grp *tg;
1257 blkg_conf_init(&ctx, buf);
1259 ret = blkg_conf_open_bdev(&ctx);
1263 if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1264 ret = blk_throtl_init(ctx.bdev->bd_disk);
1269 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
1274 if (sscanf(ctx.body, "%llu", &v) != 1)
1279 tg = blkg_to_tg(ctx.blkg);
1280 tg_update_carryover(tg);
1283 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1285 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1287 tg_conf_updated(tg, false);
1290 blkg_conf_exit(&ctx);
1291 return ret ?: nbytes;
1294 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1295 char *buf, size_t nbytes, loff_t off)
1297 return tg_set_conf(of, buf, nbytes, off, true);
1300 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1301 char *buf, size_t nbytes, loff_t off)
1303 return tg_set_conf(of, buf, nbytes, off, false);
1306 static int tg_print_rwstat(struct seq_file *sf, void *v)
1308 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1309 blkg_prfill_rwstat, &blkcg_policy_throtl,
1310 seq_cft(sf)->private, true);
1314 static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1315 struct blkg_policy_data *pd, int off)
1317 struct blkg_rwstat_sample sum;
1319 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1321 return __blkg_prfill_rwstat(sf, pd, &sum);
1324 static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
1326 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1327 tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1328 seq_cft(sf)->private, true);
1332 static struct cftype throtl_legacy_files[] = {
1334 .name = "throttle.read_bps_device",
1335 .private = offsetof(struct throtl_grp, bps[READ]),
1336 .seq_show = tg_print_conf_u64,
1337 .write = tg_set_conf_u64,
1340 .name = "throttle.write_bps_device",
1341 .private = offsetof(struct throtl_grp, bps[WRITE]),
1342 .seq_show = tg_print_conf_u64,
1343 .write = tg_set_conf_u64,
1346 .name = "throttle.read_iops_device",
1347 .private = offsetof(struct throtl_grp, iops[READ]),
1348 .seq_show = tg_print_conf_uint,
1349 .write = tg_set_conf_uint,
1352 .name = "throttle.write_iops_device",
1353 .private = offsetof(struct throtl_grp, iops[WRITE]),
1354 .seq_show = tg_print_conf_uint,
1355 .write = tg_set_conf_uint,
1358 .name = "throttle.io_service_bytes",
1359 .private = offsetof(struct throtl_grp, stat_bytes),
1360 .seq_show = tg_print_rwstat,
1363 .name = "throttle.io_service_bytes_recursive",
1364 .private = offsetof(struct throtl_grp, stat_bytes),
1365 .seq_show = tg_print_rwstat_recursive,
1368 .name = "throttle.io_serviced",
1369 .private = offsetof(struct throtl_grp, stat_ios),
1370 .seq_show = tg_print_rwstat,
1373 .name = "throttle.io_serviced_recursive",
1374 .private = offsetof(struct throtl_grp, stat_ios),
1375 .seq_show = tg_print_rwstat_recursive,
1380 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1383 struct throtl_grp *tg = pd_to_tg(pd);
1384 const char *dname = blkg_dev_name(pd->blkg);
1386 unsigned int iops_dft;
1392 iops_dft = UINT_MAX;
1394 if (tg->bps[READ] == bps_dft &&
1395 tg->bps[WRITE] == bps_dft &&
1396 tg->iops[READ] == iops_dft &&
1397 tg->iops[WRITE] == iops_dft)
1400 seq_printf(sf, "%s", dname);
1401 if (tg->bps[READ] == U64_MAX)
1402 seq_printf(sf, " rbps=max");
1404 seq_printf(sf, " rbps=%llu", tg->bps[READ]);
1406 if (tg->bps[WRITE] == U64_MAX)
1407 seq_printf(sf, " wbps=max");
1409 seq_printf(sf, " wbps=%llu", tg->bps[WRITE]);
1411 if (tg->iops[READ] == UINT_MAX)
1412 seq_printf(sf, " riops=max");
1414 seq_printf(sf, " riops=%u", tg->iops[READ]);
1416 if (tg->iops[WRITE] == UINT_MAX)
1417 seq_printf(sf, " wiops=max");
1419 seq_printf(sf, " wiops=%u", tg->iops[WRITE]);
1421 seq_printf(sf, "\n");
1425 static int tg_print_limit(struct seq_file *sf, void *v)
1427 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1428 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1432 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1433 char *buf, size_t nbytes, loff_t off)
1435 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1436 struct blkg_conf_ctx ctx;
1437 struct throtl_grp *tg;
1441 blkg_conf_init(&ctx, buf);
1443 ret = blkg_conf_open_bdev(&ctx);
1447 if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1448 ret = blk_throtl_init(ctx.bdev->bd_disk);
1453 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
1457 tg = blkg_to_tg(ctx.blkg);
1458 tg_update_carryover(tg);
1460 v[0] = tg->bps[READ];
1461 v[1] = tg->bps[WRITE];
1462 v[2] = tg->iops[READ];
1463 v[3] = tg->iops[WRITE];
1466 char tok[27]; /* wiops=18446744073709551616 */
1471 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1480 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1488 if (!strcmp(tok, "rbps") && val > 1)
1490 else if (!strcmp(tok, "wbps") && val > 1)
1492 else if (!strcmp(tok, "riops") && val > 1)
1493 v[2] = min_t(u64, val, UINT_MAX);
1494 else if (!strcmp(tok, "wiops") && val > 1)
1495 v[3] = min_t(u64, val, UINT_MAX);
1500 tg->bps[READ] = v[0];
1501 tg->bps[WRITE] = v[1];
1502 tg->iops[READ] = v[2];
1503 tg->iops[WRITE] = v[3];
1505 tg_conf_updated(tg, false);
1508 blkg_conf_exit(&ctx);
1509 return ret ?: nbytes;
1512 static struct cftype throtl_files[] = {
1515 .flags = CFTYPE_NOT_ON_ROOT,
1516 .seq_show = tg_print_limit,
1517 .write = tg_set_limit,
1522 static void throtl_shutdown_wq(struct request_queue *q)
1524 struct throtl_data *td = q->td;
1526 cancel_work_sync(&td->dispatch_work);
1529 struct blkcg_policy blkcg_policy_throtl = {
1530 .dfl_cftypes = throtl_files,
1531 .legacy_cftypes = throtl_legacy_files,
1533 .pd_alloc_fn = throtl_pd_alloc,
1534 .pd_init_fn = throtl_pd_init,
1535 .pd_online_fn = throtl_pd_online,
1536 .pd_free_fn = throtl_pd_free,
1539 void blk_throtl_cancel_bios(struct gendisk *disk)
1541 struct request_queue *q = disk->queue;
1542 struct cgroup_subsys_state *pos_css;
1543 struct blkcg_gq *blkg;
1545 if (!blk_throtl_activated(q))
1548 spin_lock_irq(&q->queue_lock);
1550 * queue_lock is held, rcu lock is not needed here technically.
1551 * However, rcu lock is still held to emphasize that following
1552 * path need RCU protection and to prevent warning from lockdep.
1555 blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
1556 struct throtl_grp *tg = blkg_to_tg(blkg);
1557 struct throtl_service_queue *sq = &tg->service_queue;
1560 * Set the flag to make sure throtl_pending_timer_fn() won't
1561 * stop until all throttled bios are dispatched.
1563 tg->flags |= THROTL_TG_CANCELING;
1566 * Do not dispatch cgroup without THROTL_TG_PENDING or cgroup
1567 * will be inserted to service queue without THROTL_TG_PENDING
1568 * set in tg_update_disptime below. Then IO dispatched from
1569 * child in tg_dispatch_one_bio will trigger double insertion
1570 * and corrupt the tree.
1572 if (!(tg->flags & THROTL_TG_PENDING))
1576 * Update disptime after setting the above flag to make sure
1577 * throtl_select_dispatch() won't exit without dispatching.
1579 tg_update_disptime(tg);
1581 throtl_schedule_pending_timer(sq, jiffies + 1);
1584 spin_unlock_irq(&q->queue_lock);
1587 static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
1589 /* throtl is FIFO - if bios are already queued, should queue */
1590 if (tg->service_queue.nr_queued[rw])
1593 return tg_may_dispatch(tg, bio, NULL);
1596 static void tg_dispatch_in_debt(struct throtl_grp *tg, struct bio *bio, bool rw)
1598 if (!bio_flagged(bio, BIO_BPS_THROTTLED))
1599 tg->carryover_bytes[rw] -= throtl_bio_data_size(bio);
1600 tg->carryover_ios[rw]--;
1603 bool __blk_throtl_bio(struct bio *bio)
1605 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1606 struct blkcg_gq *blkg = bio->bi_blkg;
1607 struct throtl_qnode *qn = NULL;
1608 struct throtl_grp *tg = blkg_to_tg(blkg);
1609 struct throtl_service_queue *sq;
1610 bool rw = bio_data_dir(bio);
1611 bool throttled = false;
1612 struct throtl_data *td = tg->td;
1615 spin_lock_irq(&q->queue_lock);
1616 sq = &tg->service_queue;
1619 if (tg_within_limit(tg, bio, rw)) {
1620 /* within limits, let's charge and dispatch directly */
1621 throtl_charge_bio(tg, bio);
1624 * We need to trim slice even when bios are not being
1625 * queued otherwise it might happen that a bio is not
1626 * queued for a long time and slice keeps on extending
1627 * and trim is not called for a long time. Now if limits
1628 * are reduced suddenly we take into account all the IO
1629 * dispatched so far at new low rate and * newly queued
1630 * IO gets a really long dispatch time.
1632 * So keep on trimming slice even if bio is not queued.
1634 throtl_trim_slice(tg, rw);
1635 } else if (bio_issue_as_root_blkg(bio)) {
1637 * IOs which may cause priority inversions are
1638 * dispatched directly, even if they're over limit.
1639 * Debts are handled by carryover_bytes/ios while
1640 * calculating wait time.
1642 tg_dispatch_in_debt(tg, bio, rw);
1644 /* if above limits, break to queue */
1649 * @bio passed through this layer without being throttled.
1650 * Climb up the ladder. If we're already at the top, it
1651 * can be executed directly.
1653 qn = &tg->qnode_on_parent[rw];
1657 bio_set_flag(bio, BIO_BPS_THROTTLED);
1662 /* out-of-limit, queue to @tg */
1663 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1664 rw == READ ? 'R' : 'W',
1665 tg->bytes_disp[rw], bio->bi_iter.bi_size,
1666 tg_bps_limit(tg, rw),
1667 tg->io_disp[rw], tg_iops_limit(tg, rw),
1668 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1670 td->nr_queued[rw]++;
1671 throtl_add_bio_tg(bio, qn, tg);
1675 * Update @tg's dispatch time and force schedule dispatch if @tg
1676 * was empty before @bio. The forced scheduling isn't likely to
1677 * cause undue delay as @bio is likely to be dispatched directly if
1678 * its @tg's disptime is not in the future.
1680 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1681 tg_update_disptime(tg);
1682 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
1686 spin_unlock_irq(&q->queue_lock);
1692 void blk_throtl_exit(struct gendisk *disk)
1694 struct request_queue *q = disk->queue;
1696 if (!blk_throtl_activated(q))
1699 del_timer_sync(&q->td->service_queue.pending_timer);
1700 throtl_shutdown_wq(q);
1701 blkcg_deactivate_policy(disk, &blkcg_policy_throtl);
1705 static int __init throtl_init(void)
1707 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1708 if (!kthrotld_workqueue)
1709 panic("Failed to create kthrotld\n");
1711 return blkcg_policy_register(&blkcg_policy_throtl);
1714 module_init(throtl_init);