| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef BLK_THROTTLE_H |
| 3 | #define BLK_THROTTLE_H |
| 4 | |
| 5 | #include "blk-cgroup-rwstat.h" |
| 6 | |
| 7 | /* |
| 8 | * To implement hierarchical throttling, throtl_grps form a tree and bios |
| 9 | * are dispatched upwards level by level until they reach the top and get |
| 10 | * issued. When dispatching bios from the children and local group at each |
| 11 | * level, if the bios are dispatched into a single bio_list, there's a risk |
| 12 | * of a local or child group which can queue many bios at once filling up |
| 13 | * the list starving others. |
| 14 | * |
| 15 | * To avoid such starvation, dispatched bios are queued separately |
| 16 | * according to where they came from. When they are again dispatched to |
| 17 | * the parent, they're popped in round-robin order so that no single source |
| 18 | * hogs the dispatch window. |
| 19 | * |
| 20 | * throtl_qnode is used to keep the queued bios separated by their sources. |
| 21 | * Bios are queued to throtl_qnode which in turn is queued to |
| 22 | * throtl_service_queue and then dispatched in round-robin order. |
| 23 | * |
| 24 | * It's also used to track the reference counts on blkg's. A qnode always |
| 25 | * belongs to a throtl_grp and gets queued on itself or the parent, so |
| 26 | * incrementing the reference of the associated throtl_grp when a qnode is |
| 27 | * queued and decrementing when dequeued is enough to keep the whole blkg |
| 28 | * tree pinned while bios are in flight. |
| 29 | */ |
| 30 | struct throtl_qnode { |
| 31 | struct list_head node; /* service_queue->queued[] */ |
| 32 | struct bio_list bios_bps; /* queued bios for bps limit */ |
| 33 | struct bio_list bios_iops; /* queued bios for iops limit */ |
| 34 | struct throtl_grp *tg; /* tg this qnode belongs to */ |
| 35 | }; |
| 36 | |
| 37 | struct throtl_service_queue { |
| 38 | struct throtl_service_queue *parent_sq; /* the parent service_queue */ |
| 39 | |
| 40 | /* |
| 41 | * Bios queued directly to this service_queue or dispatched from |
| 42 | * children throtl_grp's. |
| 43 | */ |
| 44 | struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ |
| 45 | unsigned int nr_queued_bps[2]; /* number of queued bps bios */ |
| 46 | unsigned int nr_queued_iops[2]; /* number of queued iops bios */ |
| 47 | |
| 48 | /* |
| 49 | * RB tree of active children throtl_grp's, which are sorted by |
| 50 | * their ->disptime. |
| 51 | */ |
| 52 | struct rb_root_cached pending_tree; /* RB tree of active tgs */ |
| 53 | unsigned int nr_pending; /* # queued in the tree */ |
| 54 | unsigned long first_pending_disptime; /* disptime of the first tg */ |
| 55 | struct timer_list pending_timer; /* fires on first_pending_disptime */ |
| 56 | }; |
| 57 | |
| 58 | enum tg_state_flags { |
| 59 | THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */ |
| 60 | THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */ |
| 61 | /* |
| 62 | * The sq's iops queue is empty, and a bio is about to be enqueued |
| 63 | * to the first qnode's bios_iops list. |
| 64 | */ |
| 65 | THROTL_TG_IOPS_WAS_EMPTY = 1 << 2, |
| 66 | THROTL_TG_CANCELING = 1 << 3, /* starts to cancel bio */ |
| 67 | }; |
| 68 | |
| 69 | struct throtl_grp { |
| 70 | /* must be the first member */ |
| 71 | struct blkg_policy_data pd; |
| 72 | |
| 73 | /* active throtl group service_queue member */ |
| 74 | struct rb_node rb_node; |
| 75 | |
| 76 | /* throtl_data this group belongs to */ |
| 77 | struct throtl_data *td; |
| 78 | |
| 79 | /* this group's service queue */ |
| 80 | struct throtl_service_queue service_queue; |
| 81 | |
| 82 | /* |
| 83 | * qnode_on_self is used when bios are directly queued to this |
| 84 | * throtl_grp so that local bios compete fairly with bios |
| 85 | * dispatched from children. qnode_on_parent is used when bios are |
| 86 | * dispatched from this throtl_grp into its parent and will compete |
| 87 | * with the sibling qnode_on_parents and the parent's |
| 88 | * qnode_on_self. |
| 89 | */ |
| 90 | struct throtl_qnode qnode_on_self[2]; |
| 91 | struct throtl_qnode qnode_on_parent[2]; |
| 92 | |
| 93 | /* |
| 94 | * Dispatch time in jiffies. This is the estimated time when group |
| 95 | * will unthrottle and is ready to dispatch more bio. It is used as |
| 96 | * key to sort active groups in service tree. |
| 97 | */ |
| 98 | unsigned long disptime; |
| 99 | |
| 100 | unsigned int flags; |
| 101 | |
| 102 | /* are there any throtl rules between this group and td? */ |
| 103 | bool has_rules_bps[2]; |
| 104 | bool has_rules_iops[2]; |
| 105 | |
| 106 | /* bytes per second rate limits */ |
| 107 | uint64_t bps[2]; |
| 108 | |
| 109 | /* IOPS limits */ |
| 110 | unsigned int iops[2]; |
| 111 | |
| 112 | /* |
| 113 | * Number of bytes/bio's dispatched in current slice. |
| 114 | * When new configuration is submitted while some bios are still throttled, |
| 115 | * first calculate the carryover: the amount of bytes/IOs already waited |
| 116 | * under the previous configuration. Then, [bytes/io]_disp are represented |
| 117 | * as the negative of the carryover, and they will be used to calculate the |
| 118 | * wait time under the new configuration. |
| 119 | */ |
| 120 | int64_t bytes_disp[2]; |
| 121 | int io_disp[2]; |
| 122 | |
| 123 | unsigned long last_check_time; |
| 124 | |
| 125 | /* When did we start a new slice */ |
| 126 | unsigned long slice_start[2]; |
| 127 | unsigned long slice_end[2]; |
| 128 | |
| 129 | struct blkg_rwstat stat_bytes; |
| 130 | struct blkg_rwstat stat_ios; |
| 131 | }; |
| 132 | |
| 133 | extern struct blkcg_policy blkcg_policy_throtl; |
| 134 | |
| 135 | static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd) |
| 136 | { |
| 137 | return pd ? container_of(pd, struct throtl_grp, pd) : NULL; |
| 138 | } |
| 139 | |
| 140 | static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) |
| 141 | { |
| 142 | return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl)); |
| 143 | } |
| 144 | |
| 145 | /* |
| 146 | * Internal throttling interface |
| 147 | */ |
| 148 | #ifndef CONFIG_BLK_DEV_THROTTLING |
| 149 | static inline void blk_throtl_exit(struct gendisk *disk) { } |
| 150 | static inline bool blk_throtl_bio(struct bio *bio) { return false; } |
| 151 | static inline void blk_throtl_cancel_bios(struct gendisk *disk) { } |
| 152 | #else /* CONFIG_BLK_DEV_THROTTLING */ |
| 153 | void blk_throtl_exit(struct gendisk *disk); |
| 154 | bool __blk_throtl_bio(struct bio *bio); |
| 155 | void blk_throtl_cancel_bios(struct gendisk *disk); |
| 156 | |
| 157 | static inline bool blk_throtl_activated(struct request_queue *q) |
| 158 | { |
| 159 | return q->td != NULL; |
| 160 | } |
| 161 | |
| 162 | static inline bool blk_should_throtl(struct bio *bio) |
| 163 | { |
| 164 | struct throtl_grp *tg; |
| 165 | int rw = bio_data_dir(bio); |
| 166 | |
| 167 | /* |
| 168 | * This is called under bio_queue_enter(), and it's synchronized with |
| 169 | * the activation of blk-throtl, which is protected by |
| 170 | * blk_mq_freeze_queue(). |
| 171 | */ |
| 172 | if (!blk_throtl_activated(bio->bi_bdev->bd_queue)) |
| 173 | return false; |
| 174 | |
| 175 | tg = blkg_to_tg(bio->bi_blkg); |
| 176 | if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) { |
| 177 | if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { |
| 178 | bio_set_flag(bio, BIO_CGROUP_ACCT); |
| 179 | blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf, |
| 180 | bio->bi_iter.bi_size); |
| 181 | } |
| 182 | blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1); |
| 183 | } |
| 184 | |
| 185 | /* iops limit is always counted */ |
| 186 | if (tg->has_rules_iops[rw]) |
| 187 | return true; |
| 188 | |
| 189 | if (tg->has_rules_bps[rw] && !bio_flagged(bio, BIO_BPS_THROTTLED)) |
| 190 | return true; |
| 191 | |
| 192 | return false; |
| 193 | } |
| 194 | |
| 195 | static inline bool blk_throtl_bio(struct bio *bio) |
| 196 | { |
| 197 | |
| 198 | if (!blk_should_throtl(bio)) |
| 199 | return false; |
| 200 | |
| 201 | return __blk_throtl_bio(bio); |
| 202 | } |
| 203 | #endif /* CONFIG_BLK_DEV_THROTTLING */ |
| 204 | |
| 205 | #endif |