mfd: kempld-core: Constify variables that point to const structure
[linux-2.6-block.git] / block / blk-throttle.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
e43473b7
VG
2/*
3 * Interface for controlling IO bandwidth on a request queue
4 *
5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/blkdev.h>
11#include <linux/bio.h>
12#include <linux/blktrace_api.h>
eea8f41c 13#include <linux/blk-cgroup.h>
bc9fcbf9 14#include "blk.h"
e43473b7
VG
15
16/* Max dispatch from a group in 1 round */
17static int throtl_grp_quantum = 8;
18
19/* Total max dispatch from all groups in one round */
20static int throtl_quantum = 32;
21
d61fcfa4
SL
22/* Throttling is performed over a slice and after that slice is renewed */
23#define DFL_THROTL_SLICE_HD (HZ / 10)
24#define DFL_THROTL_SLICE_SSD (HZ / 50)
297e3d85 25#define MAX_THROTL_SLICE (HZ)
9e234eea 26#define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
9bb67aeb
SL
27#define MIN_THROTL_BPS (320 * 1024)
28#define MIN_THROTL_IOPS (10)
b4f428ef
SL
29#define DFL_LATENCY_TARGET (-1L)
30#define DFL_IDLE_THRESHOLD (0)
6679a90c
SL
31#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
32#define LATENCY_FILTERED_SSD (0)
33/*
34 * For HD, very small latency comes from sequential IO. Such IO is helpless to
35 * help determine if its IO is impacted by others, hence we ignore the IO
36 */
37#define LATENCY_FILTERED_HD (1000L) /* 1ms */
e43473b7 38
3c798398 39static struct blkcg_policy blkcg_policy_throtl;
0381411e 40
450adcbe
VG
41/* A workqueue to queue throttle related work */
42static struct workqueue_struct *kthrotld_workqueue;
450adcbe 43
c5cc2070
TH
44/*
45 * To implement hierarchical throttling, throtl_grps form a tree and bios
46 * are dispatched upwards level by level until they reach the top and get
47 * issued. When dispatching bios from the children and local group at each
48 * level, if the bios are dispatched into a single bio_list, there's a risk
49 * of a local or child group which can queue many bios at once filling up
50 * the list starving others.
51 *
52 * To avoid such starvation, dispatched bios are queued separately
53 * according to where they came from. When they are again dispatched to
54 * the parent, they're popped in round-robin order so that no single source
55 * hogs the dispatch window.
56 *
57 * throtl_qnode is used to keep the queued bios separated by their sources.
58 * Bios are queued to throtl_qnode which in turn is queued to
59 * throtl_service_queue and then dispatched in round-robin order.
60 *
61 * It's also used to track the reference counts on blkg's. A qnode always
62 * belongs to a throtl_grp and gets queued on itself or the parent, so
63 * incrementing the reference of the associated throtl_grp when a qnode is
64 * queued and decrementing when dequeued is enough to keep the whole blkg
65 * tree pinned while bios are in flight.
66 */
67struct throtl_qnode {
68 struct list_head node; /* service_queue->queued[] */
69 struct bio_list bios; /* queued bios */
70 struct throtl_grp *tg; /* tg this qnode belongs to */
71};
72
c9e0332e 73struct throtl_service_queue {
77216b04
TH
74 struct throtl_service_queue *parent_sq; /* the parent service_queue */
75
73f0d49a
TH
76 /*
77 * Bios queued directly to this service_queue or dispatched from
78 * children throtl_grp's.
79 */
c5cc2070 80 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
73f0d49a
TH
81 unsigned int nr_queued[2]; /* number of queued bios */
82
83 /*
84 * RB tree of active children throtl_grp's, which are sorted by
85 * their ->disptime.
86 */
c9e0332e
TH
87 struct rb_root pending_tree; /* RB tree of active tgs */
88 struct rb_node *first_pending; /* first node in the tree */
89 unsigned int nr_pending; /* # queued in the tree */
90 unsigned long first_pending_disptime; /* disptime of the first tg */
69df0ab0 91 struct timer_list pending_timer; /* fires on first_pending_disptime */
e43473b7
VG
92};
93
5b2c16aa
TH
94enum tg_state_flags {
95 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
0e9f4164 96 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
5b2c16aa
TH
97};
98
e43473b7
VG
99#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
100
9f626e37 101enum {
cd5ab1b0 102 LIMIT_LOW,
9f626e37
SL
103 LIMIT_MAX,
104 LIMIT_CNT,
105};
106
e43473b7 107struct throtl_grp {
f95a04af
TH
108 /* must be the first member */
109 struct blkg_policy_data pd;
110
c9e0332e 111 /* active throtl group service_queue member */
e43473b7
VG
112 struct rb_node rb_node;
113
0f3457f6
TH
114 /* throtl_data this group belongs to */
115 struct throtl_data *td;
116
49a2f1e3
TH
117 /* this group's service queue */
118 struct throtl_service_queue service_queue;
119
c5cc2070
TH
120 /*
121 * qnode_on_self is used when bios are directly queued to this
122 * throtl_grp so that local bios compete fairly with bios
123 * dispatched from children. qnode_on_parent is used when bios are
124 * dispatched from this throtl_grp into its parent and will compete
125 * with the sibling qnode_on_parents and the parent's
126 * qnode_on_self.
127 */
128 struct throtl_qnode qnode_on_self[2];
129 struct throtl_qnode qnode_on_parent[2];
130
e43473b7
VG
131 /*
132 * Dispatch time in jiffies. This is the estimated time when group
133 * will unthrottle and is ready to dispatch more bio. It is used as
134 * key to sort active groups in service tree.
135 */
136 unsigned long disptime;
137
e43473b7
VG
138 unsigned int flags;
139
693e751e
TH
140 /* are there any throtl rules between this group and td? */
141 bool has_rules[2];
142
cd5ab1b0 143 /* internally used bytes per second rate limits */
9f626e37 144 uint64_t bps[2][LIMIT_CNT];
cd5ab1b0
SL
145 /* user configured bps limits */
146 uint64_t bps_conf[2][LIMIT_CNT];
e43473b7 147
cd5ab1b0 148 /* internally used IOPS limits */
9f626e37 149 unsigned int iops[2][LIMIT_CNT];
cd5ab1b0
SL
150 /* user configured IOPS limits */
151 unsigned int iops_conf[2][LIMIT_CNT];
8e89d13f 152
e43473b7
VG
153 /* Number of bytes disptached in current slice */
154 uint64_t bytes_disp[2];
8e89d13f
VG
155 /* Number of bio's dispatched in current slice */
156 unsigned int io_disp[2];
e43473b7 157
3f0abd80
SL
158 unsigned long last_low_overflow_time[2];
159
160 uint64_t last_bytes_disp[2];
161 unsigned int last_io_disp[2];
162
163 unsigned long last_check_time;
164
ec80991d 165 unsigned long latency_target; /* us */
5b81fc3c 166 unsigned long latency_target_conf; /* us */
e43473b7
VG
167 /* When did we start a new slice */
168 unsigned long slice_start[2];
169 unsigned long slice_end[2];
9e234eea
SL
170
171 unsigned long last_finish_time; /* ns / 1024 */
172 unsigned long checked_last_finish_time; /* ns / 1024 */
173 unsigned long avg_idletime; /* ns / 1024 */
174 unsigned long idletime_threshold; /* us */
5b81fc3c 175 unsigned long idletime_threshold_conf; /* us */
53696b8d
SL
176
177 unsigned int bio_cnt; /* total bios */
178 unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
179 unsigned long bio_cnt_reset_time;
e43473b7
VG
180};
181
b9147dd1
SL
182/* We measure latency for request size from <= 4k to >= 1M */
183#define LATENCY_BUCKET_SIZE 9
184
185struct latency_bucket {
186 unsigned long total_latency; /* ns / 1024 */
187 int samples;
188};
189
190struct avg_latency_bucket {
191 unsigned long latency; /* ns / 1024 */
192 bool valid;
193};
194
e43473b7
VG
195struct throtl_data
196{
e43473b7 197 /* service tree for active throtl groups */
c9e0332e 198 struct throtl_service_queue service_queue;
e43473b7 199
e43473b7
VG
200 struct request_queue *queue;
201
202 /* Total Number of queued bios on READ and WRITE lists */
203 unsigned int nr_queued[2];
204
297e3d85
SL
205 unsigned int throtl_slice;
206
e43473b7 207 /* Work for dispatching throttled bios */
69df0ab0 208 struct work_struct dispatch_work;
9f626e37
SL
209 unsigned int limit_index;
210 bool limit_valid[LIMIT_CNT];
3f0abd80
SL
211
212 unsigned long low_upgrade_time;
213 unsigned long low_downgrade_time;
7394e31f
SL
214
215 unsigned int scale;
b9147dd1 216
b889bf66
JQ
217 struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
218 struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
219 struct latency_bucket __percpu *latency_buckets[2];
b9147dd1 220 unsigned long last_calculate_time;
6679a90c 221 unsigned long filtered_latency;
b9147dd1
SL
222
223 bool track_bio_latency;
e43473b7
VG
224};
225
e99e88a9 226static void throtl_pending_timer_fn(struct timer_list *t);
69df0ab0 227
f95a04af
TH
228static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
229{
230 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
231}
232
3c798398 233static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
0381411e 234{
f95a04af 235 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
0381411e
TH
236}
237
3c798398 238static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
0381411e 239{
f95a04af 240 return pd_to_blkg(&tg->pd);
0381411e
TH
241}
242
fda6f272
TH
243/**
244 * sq_to_tg - return the throl_grp the specified service queue belongs to
245 * @sq: the throtl_service_queue of interest
246 *
247 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
248 * embedded in throtl_data, %NULL is returned.
249 */
250static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
251{
252 if (sq && sq->parent_sq)
253 return container_of(sq, struct throtl_grp, service_queue);
254 else
255 return NULL;
256}
257
258/**
259 * sq_to_td - return throtl_data the specified service queue belongs to
260 * @sq: the throtl_service_queue of interest
261 *
b43daedc 262 * A service_queue can be embedded in either a throtl_grp or throtl_data.
fda6f272
TH
263 * Determine the associated throtl_data accordingly and return it.
264 */
265static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
266{
267 struct throtl_grp *tg = sq_to_tg(sq);
268
269 if (tg)
270 return tg->td;
271 else
272 return container_of(sq, struct throtl_data, service_queue);
273}
274
7394e31f
SL
275/*
276 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
277 * make the IO dispatch more smooth.
278 * Scale up: linearly scale up according to lapsed time since upgrade. For
279 * every throtl_slice, the limit scales up 1/2 .low limit till the
280 * limit hits .max limit
281 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
282 */
283static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
284{
285 /* arbitrary value to avoid too big scale */
286 if (td->scale < 4096 && time_after_eq(jiffies,
287 td->low_upgrade_time + td->scale * td->throtl_slice))
288 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
289
290 return low + (low >> 1) * td->scale;
291}
292
9f626e37
SL
293static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
294{
b22c417c 295 struct blkcg_gq *blkg = tg_to_blkg(tg);
7394e31f 296 struct throtl_data *td;
b22c417c
SL
297 uint64_t ret;
298
299 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
300 return U64_MAX;
7394e31f
SL
301
302 td = tg->td;
303 ret = tg->bps[rw][td->limit_index];
9bb67aeb
SL
304 if (ret == 0 && td->limit_index == LIMIT_LOW) {
305 /* intermediate node or iops isn't 0 */
306 if (!list_empty(&blkg->blkcg->css.children) ||
307 tg->iops[rw][td->limit_index])
308 return U64_MAX;
309 else
310 return MIN_THROTL_BPS;
311 }
7394e31f
SL
312
313 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
314 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
315 uint64_t adjusted;
316
317 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
318 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
319 }
b22c417c 320 return ret;
9f626e37
SL
321}
322
323static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
324{
b22c417c 325 struct blkcg_gq *blkg = tg_to_blkg(tg);
7394e31f 326 struct throtl_data *td;
b22c417c
SL
327 unsigned int ret;
328
329 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
330 return UINT_MAX;
9bb67aeb 331
7394e31f
SL
332 td = tg->td;
333 ret = tg->iops[rw][td->limit_index];
9bb67aeb
SL
334 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
335 /* intermediate node or bps isn't 0 */
336 if (!list_empty(&blkg->blkcg->css.children) ||
337 tg->bps[rw][td->limit_index])
338 return UINT_MAX;
339 else
340 return MIN_THROTL_IOPS;
341 }
7394e31f
SL
342
343 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
344 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
345 uint64_t adjusted;
346
347 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
348 if (adjusted > UINT_MAX)
349 adjusted = UINT_MAX;
350 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
351 }
b22c417c 352 return ret;
9f626e37
SL
353}
354
b9147dd1
SL
355#define request_bucket_index(sectors) \
356 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
357
fda6f272
TH
358/**
359 * throtl_log - log debug message via blktrace
360 * @sq: the service_queue being reported
361 * @fmt: printf format string
362 * @args: printf args
363 *
364 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
365 * throtl_grp; otherwise, just "throtl".
fda6f272
TH
366 */
367#define throtl_log(sq, fmt, args...) do { \
368 struct throtl_grp *__tg = sq_to_tg((sq)); \
369 struct throtl_data *__td = sq_to_td((sq)); \
370 \
371 (void)__td; \
59fa0224
SL
372 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
373 break; \
fda6f272 374 if ((__tg)) { \
35fe6d76
SL
375 blk_add_cgroup_trace_msg(__td->queue, \
376 tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
fda6f272
TH
377 } else { \
378 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
379 } \
54e7ed12 380} while (0)
e43473b7 381
ea0ea2bc
SL
382static inline unsigned int throtl_bio_data_size(struct bio *bio)
383{
384 /* assume it's one sector */
385 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
386 return 512;
387 return bio->bi_iter.bi_size;
388}
389
c5cc2070
TH
390static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
391{
392 INIT_LIST_HEAD(&qn->node);
393 bio_list_init(&qn->bios);
394 qn->tg = tg;
395}
396
397/**
398 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
399 * @bio: bio being added
400 * @qn: qnode to add bio to
401 * @queued: the service_queue->queued[] list @qn belongs to
402 *
403 * Add @bio to @qn and put @qn on @queued if it's not already on.
404 * @qn->tg's reference count is bumped when @qn is activated. See the
405 * comment on top of throtl_qnode definition for details.
406 */
407static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
408 struct list_head *queued)
409{
410 bio_list_add(&qn->bios, bio);
411 if (list_empty(&qn->node)) {
412 list_add_tail(&qn->node, queued);
413 blkg_get(tg_to_blkg(qn->tg));
414 }
415}
416
417/**
418 * throtl_peek_queued - peek the first bio on a qnode list
419 * @queued: the qnode list to peek
420 */
421static struct bio *throtl_peek_queued(struct list_head *queued)
422{
423 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
424 struct bio *bio;
425
426 if (list_empty(queued))
427 return NULL;
428
429 bio = bio_list_peek(&qn->bios);
430 WARN_ON_ONCE(!bio);
431 return bio;
432}
433
434/**
435 * throtl_pop_queued - pop the first bio form a qnode list
436 * @queued: the qnode list to pop a bio from
437 * @tg_to_put: optional out argument for throtl_grp to put
438 *
439 * Pop the first bio from the qnode list @queued. After popping, the first
440 * qnode is removed from @queued if empty or moved to the end of @queued so
441 * that the popping order is round-robin.
442 *
443 * When the first qnode is removed, its associated throtl_grp should be put
444 * too. If @tg_to_put is NULL, this function automatically puts it;
445 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
446 * responsible for putting it.
447 */
448static struct bio *throtl_pop_queued(struct list_head *queued,
449 struct throtl_grp **tg_to_put)
450{
451 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
452 struct bio *bio;
453
454 if (list_empty(queued))
455 return NULL;
456
457 bio = bio_list_pop(&qn->bios);
458 WARN_ON_ONCE(!bio);
459
460 if (bio_list_empty(&qn->bios)) {
461 list_del_init(&qn->node);
462 if (tg_to_put)
463 *tg_to_put = qn->tg;
464 else
465 blkg_put(tg_to_blkg(qn->tg));
466 } else {
467 list_move_tail(&qn->node, queued);
468 }
469
470 return bio;
471}
472
49a2f1e3 473/* init a service_queue, assumes the caller zeroed it */
b2ce2643 474static void throtl_service_queue_init(struct throtl_service_queue *sq)
49a2f1e3 475{
c5cc2070
TH
476 INIT_LIST_HEAD(&sq->queued[0]);
477 INIT_LIST_HEAD(&sq->queued[1]);
49a2f1e3 478 sq->pending_tree = RB_ROOT;
e99e88a9 479 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
69df0ab0
TH
480}
481
001bea73
TH
482static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
483{
4fb72036 484 struct throtl_grp *tg;
24bdb8ef 485 int rw;
4fb72036
TH
486
487 tg = kzalloc_node(sizeof(*tg), gfp, node);
488 if (!tg)
77ea7338 489 return NULL;
4fb72036 490
b2ce2643
TH
491 throtl_service_queue_init(&tg->service_queue);
492
493 for (rw = READ; rw <= WRITE; rw++) {
494 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
495 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
496 }
497
498 RB_CLEAR_NODE(&tg->rb_node);
9f626e37
SL
499 tg->bps[READ][LIMIT_MAX] = U64_MAX;
500 tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
501 tg->iops[READ][LIMIT_MAX] = UINT_MAX;
502 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
cd5ab1b0
SL
503 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
504 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
505 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
506 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
507 /* LIMIT_LOW will have default value 0 */
b2ce2643 508
ec80991d 509 tg->latency_target = DFL_LATENCY_TARGET;
5b81fc3c 510 tg->latency_target_conf = DFL_LATENCY_TARGET;
b4f428ef
SL
511 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
512 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
ec80991d 513
4fb72036 514 return &tg->pd;
001bea73
TH
515}
516
a9520cd6 517static void throtl_pd_init(struct blkg_policy_data *pd)
a29a171e 518{
a9520cd6
TH
519 struct throtl_grp *tg = pd_to_tg(pd);
520 struct blkcg_gq *blkg = tg_to_blkg(tg);
77216b04 521 struct throtl_data *td = blkg->q->td;
b2ce2643 522 struct throtl_service_queue *sq = &tg->service_queue;
cd1604fa 523
9138125b 524 /*
aa6ec29b 525 * If on the default hierarchy, we switch to properly hierarchical
9138125b
TH
526 * behavior where limits on a given throtl_grp are applied to the
527 * whole subtree rather than just the group itself. e.g. If 16M
528 * read_bps limit is set on the root group, the whole system can't
529 * exceed 16M for the device.
530 *
aa6ec29b 531 * If not on the default hierarchy, the broken flat hierarchy
9138125b
TH
532 * behavior is retained where all throtl_grps are treated as if
533 * they're all separate root groups right below throtl_data.
534 * Limits of a group don't interact with limits of other groups
535 * regardless of the position of the group in the hierarchy.
536 */
b2ce2643 537 sq->parent_sq = &td->service_queue;
9e10a130 538 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
b2ce2643 539 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
77216b04 540 tg->td = td;
8a3d2615
TH
541}
542
693e751e
TH
543/*
544 * Set has_rules[] if @tg or any of its parents have limits configured.
545 * This doesn't require walking up to the top of the hierarchy as the
546 * parent's has_rules[] is guaranteed to be correct.
547 */
548static void tg_update_has_rules(struct throtl_grp *tg)
549{
550 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
9f626e37 551 struct throtl_data *td = tg->td;
693e751e
TH
552 int rw;
553
554 for (rw = READ; rw <= WRITE; rw++)
555 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
9f626e37
SL
556 (td->limit_valid[td->limit_index] &&
557 (tg_bps_limit(tg, rw) != U64_MAX ||
558 tg_iops_limit(tg, rw) != UINT_MAX));
693e751e
TH
559}
560
a9520cd6 561static void throtl_pd_online(struct blkg_policy_data *pd)
693e751e 562{
aec24246 563 struct throtl_grp *tg = pd_to_tg(pd);
693e751e
TH
564 /*
565 * We don't want new groups to escape the limits of its ancestors.
566 * Update has_rules[] after a new group is brought online.
567 */
aec24246 568 tg_update_has_rules(tg);
693e751e
TH
569}
570
cd5ab1b0
SL
571static void blk_throtl_update_limit_valid(struct throtl_data *td)
572{
573 struct cgroup_subsys_state *pos_css;
574 struct blkcg_gq *blkg;
575 bool low_valid = false;
576
577 rcu_read_lock();
578 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
579 struct throtl_grp *tg = blkg_to_tg(blkg);
580
581 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
582 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
583 low_valid = true;
584 }
585 rcu_read_unlock();
586
587 td->limit_valid[LIMIT_LOW] = low_valid;
588}
589
c79892c5 590static void throtl_upgrade_state(struct throtl_data *td);
cd5ab1b0
SL
591static void throtl_pd_offline(struct blkg_policy_data *pd)
592{
593 struct throtl_grp *tg = pd_to_tg(pd);
594
595 tg->bps[READ][LIMIT_LOW] = 0;
596 tg->bps[WRITE][LIMIT_LOW] = 0;
597 tg->iops[READ][LIMIT_LOW] = 0;
598 tg->iops[WRITE][LIMIT_LOW] = 0;
599
600 blk_throtl_update_limit_valid(tg->td);
601
c79892c5
SL
602 if (!tg->td->limit_valid[tg->td->limit_index])
603 throtl_upgrade_state(tg->td);
cd5ab1b0
SL
604}
605
001bea73
TH
606static void throtl_pd_free(struct blkg_policy_data *pd)
607{
4fb72036
TH
608 struct throtl_grp *tg = pd_to_tg(pd);
609
b2ce2643 610 del_timer_sync(&tg->service_queue.pending_timer);
4fb72036 611 kfree(tg);
001bea73
TH
612}
613
0049af73
TH
614static struct throtl_grp *
615throtl_rb_first(struct throtl_service_queue *parent_sq)
e43473b7
VG
616{
617 /* Service tree is empty */
0049af73 618 if (!parent_sq->nr_pending)
e43473b7
VG
619 return NULL;
620
0049af73
TH
621 if (!parent_sq->first_pending)
622 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
e43473b7 623
0049af73
TH
624 if (parent_sq->first_pending)
625 return rb_entry_tg(parent_sq->first_pending);
e43473b7
VG
626
627 return NULL;
628}
629
630static void rb_erase_init(struct rb_node *n, struct rb_root *root)
631{
632 rb_erase(n, root);
633 RB_CLEAR_NODE(n);
634}
635
0049af73
TH
636static void throtl_rb_erase(struct rb_node *n,
637 struct throtl_service_queue *parent_sq)
e43473b7 638{
0049af73
TH
639 if (parent_sq->first_pending == n)
640 parent_sq->first_pending = NULL;
641 rb_erase_init(n, &parent_sq->pending_tree);
642 --parent_sq->nr_pending;
e43473b7
VG
643}
644
0049af73 645static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
e43473b7
VG
646{
647 struct throtl_grp *tg;
648
0049af73 649 tg = throtl_rb_first(parent_sq);
e43473b7
VG
650 if (!tg)
651 return;
652
0049af73 653 parent_sq->first_pending_disptime = tg->disptime;
e43473b7
VG
654}
655
77216b04 656static void tg_service_queue_add(struct throtl_grp *tg)
e43473b7 657{
77216b04 658 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
0049af73 659 struct rb_node **node = &parent_sq->pending_tree.rb_node;
e43473b7
VG
660 struct rb_node *parent = NULL;
661 struct throtl_grp *__tg;
662 unsigned long key = tg->disptime;
663 int left = 1;
664
665 while (*node != NULL) {
666 parent = *node;
667 __tg = rb_entry_tg(parent);
668
669 if (time_before(key, __tg->disptime))
670 node = &parent->rb_left;
671 else {
672 node = &parent->rb_right;
673 left = 0;
674 }
675 }
676
677 if (left)
0049af73 678 parent_sq->first_pending = &tg->rb_node;
e43473b7
VG
679
680 rb_link_node(&tg->rb_node, parent, node);
0049af73 681 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
e43473b7
VG
682}
683
77216b04 684static void __throtl_enqueue_tg(struct throtl_grp *tg)
e43473b7 685{
77216b04 686 tg_service_queue_add(tg);
5b2c16aa 687 tg->flags |= THROTL_TG_PENDING;
77216b04 688 tg->service_queue.parent_sq->nr_pending++;
e43473b7
VG
689}
690
77216b04 691static void throtl_enqueue_tg(struct throtl_grp *tg)
e43473b7 692{
5b2c16aa 693 if (!(tg->flags & THROTL_TG_PENDING))
77216b04 694 __throtl_enqueue_tg(tg);
e43473b7
VG
695}
696
77216b04 697static void __throtl_dequeue_tg(struct throtl_grp *tg)
e43473b7 698{
77216b04 699 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
5b2c16aa 700 tg->flags &= ~THROTL_TG_PENDING;
e43473b7
VG
701}
702
77216b04 703static void throtl_dequeue_tg(struct throtl_grp *tg)
e43473b7 704{
5b2c16aa 705 if (tg->flags & THROTL_TG_PENDING)
77216b04 706 __throtl_dequeue_tg(tg);
e43473b7
VG
707}
708
a9131a27 709/* Call with queue lock held */
69df0ab0
TH
710static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
711 unsigned long expires)
a9131a27 712{
a41b816c 713 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
06cceedc
SL
714
715 /*
716 * Since we are adjusting the throttle limit dynamically, the sleep
717 * time calculated according to previous limit might be invalid. It's
718 * possible the cgroup sleep time is very long and no other cgroups
719 * have IO running so notify the limit changes. Make sure the cgroup
720 * doesn't sleep too long to avoid the missed notification.
721 */
722 if (time_after(expires, max_expire))
723 expires = max_expire;
69df0ab0
TH
724 mod_timer(&sq->pending_timer, expires);
725 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
726 expires - jiffies, jiffies);
a9131a27
TH
727}
728
7f52f98c
TH
729/**
730 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
731 * @sq: the service_queue to schedule dispatch for
732 * @force: force scheduling
733 *
734 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
735 * dispatch time of the first pending child. Returns %true if either timer
736 * is armed or there's no pending child left. %false if the current
737 * dispatch window is still open and the caller should continue
738 * dispatching.
739 *
740 * If @force is %true, the dispatch timer is always scheduled and this
741 * function is guaranteed to return %true. This is to be used when the
742 * caller can't dispatch itself and needs to invoke pending_timer
743 * unconditionally. Note that forced scheduling is likely to induce short
744 * delay before dispatch starts even if @sq->first_pending_disptime is not
745 * in the future and thus shouldn't be used in hot paths.
746 */
747static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
748 bool force)
e43473b7 749{
6a525600 750 /* any pending children left? */
c9e0332e 751 if (!sq->nr_pending)
7f52f98c 752 return true;
e43473b7 753
c9e0332e 754 update_min_dispatch_time(sq);
e43473b7 755
69df0ab0 756 /* is the next dispatch time in the future? */
7f52f98c 757 if (force || time_after(sq->first_pending_disptime, jiffies)) {
69df0ab0 758 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
7f52f98c 759 return true;
69df0ab0
TH
760 }
761
7f52f98c
TH
762 /* tell the caller to continue dispatching */
763 return false;
e43473b7
VG
764}
765
32ee5bc4
VG
766static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
767 bool rw, unsigned long start)
768{
769 tg->bytes_disp[rw] = 0;
770 tg->io_disp[rw] = 0;
771
772 /*
773 * Previous slice has expired. We must have trimmed it after last
774 * bio dispatch. That means since start of last slice, we never used
775 * that bandwidth. Do try to make use of that bandwidth while giving
776 * credit.
777 */
778 if (time_after_eq(start, tg->slice_start[rw]))
779 tg->slice_start[rw] = start;
780
297e3d85 781 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
32ee5bc4
VG
782 throtl_log(&tg->service_queue,
783 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
784 rw == READ ? 'R' : 'W', tg->slice_start[rw],
785 tg->slice_end[rw], jiffies);
786}
787
0f3457f6 788static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
e43473b7
VG
789{
790 tg->bytes_disp[rw] = 0;
8e89d13f 791 tg->io_disp[rw] = 0;
e43473b7 792 tg->slice_start[rw] = jiffies;
297e3d85 793 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
fda6f272
TH
794 throtl_log(&tg->service_queue,
795 "[%c] new slice start=%lu end=%lu jiffies=%lu",
796 rw == READ ? 'R' : 'W', tg->slice_start[rw],
797 tg->slice_end[rw], jiffies);
e43473b7
VG
798}
799
0f3457f6
TH
800static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
801 unsigned long jiffy_end)
d1ae8ffd 802{
297e3d85 803 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
d1ae8ffd
VG
804}
805
0f3457f6
TH
806static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
807 unsigned long jiffy_end)
e43473b7 808{
297e3d85 809 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
fda6f272
TH
810 throtl_log(&tg->service_queue,
811 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
812 rw == READ ? 'R' : 'W', tg->slice_start[rw],
813 tg->slice_end[rw], jiffies);
e43473b7
VG
814}
815
816/* Determine if previously allocated or extended slice is complete or not */
0f3457f6 817static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
e43473b7
VG
818{
819 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
5cf8c227 820 return false;
e43473b7 821
0b6bad7d 822 return true;
e43473b7
VG
823}
824
825/* Trim the used slices and adjust slice start accordingly */
0f3457f6 826static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
e43473b7 827{
3aad5d3e
VG
828 unsigned long nr_slices, time_elapsed, io_trim;
829 u64 bytes_trim, tmp;
e43473b7
VG
830
831 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
832
833 /*
834 * If bps are unlimited (-1), then time slice don't get
835 * renewed. Don't try to trim the slice if slice is used. A new
836 * slice will start when appropriate.
837 */
0f3457f6 838 if (throtl_slice_used(tg, rw))
e43473b7
VG
839 return;
840
d1ae8ffd
VG
841 /*
842 * A bio has been dispatched. Also adjust slice_end. It might happen
843 * that initially cgroup limit was very low resulting in high
844 * slice_end, but later limit was bumped up and bio was dispached
845 * sooner, then we need to reduce slice_end. A high bogus slice_end
846 * is bad because it does not allow new slice to start.
847 */
848
297e3d85 849 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
d1ae8ffd 850
e43473b7
VG
851 time_elapsed = jiffies - tg->slice_start[rw];
852
297e3d85 853 nr_slices = time_elapsed / tg->td->throtl_slice;
e43473b7
VG
854
855 if (!nr_slices)
856 return;
297e3d85 857 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
3aad5d3e
VG
858 do_div(tmp, HZ);
859 bytes_trim = tmp;
e43473b7 860
297e3d85
SL
861 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
862 HZ;
e43473b7 863
8e89d13f 864 if (!bytes_trim && !io_trim)
e43473b7
VG
865 return;
866
867 if (tg->bytes_disp[rw] >= bytes_trim)
868 tg->bytes_disp[rw] -= bytes_trim;
869 else
870 tg->bytes_disp[rw] = 0;
871
8e89d13f
VG
872 if (tg->io_disp[rw] >= io_trim)
873 tg->io_disp[rw] -= io_trim;
874 else
875 tg->io_disp[rw] = 0;
876
297e3d85 877 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
e43473b7 878
fda6f272
TH
879 throtl_log(&tg->service_queue,
880 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
881 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
882 tg->slice_start[rw], tg->slice_end[rw], jiffies);
e43473b7
VG
883}
884
0f3457f6
TH
885static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
886 unsigned long *wait)
e43473b7
VG
887{
888 bool rw = bio_data_dir(bio);
8e89d13f 889 unsigned int io_allowed;
e43473b7 890 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
c49c06e4 891 u64 tmp;
e43473b7 892
8e89d13f 893 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
e43473b7 894
8e89d13f
VG
895 /* Slice has just started. Consider one slice interval */
896 if (!jiffy_elapsed)
297e3d85 897 jiffy_elapsed_rnd = tg->td->throtl_slice;
8e89d13f 898
297e3d85 899 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
8e89d13f 900
c49c06e4
VG
901 /*
902 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
903 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
904 * will allow dispatch after 1 second and after that slice should
905 * have been trimmed.
906 */
907
9f626e37 908 tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
c49c06e4
VG
909 do_div(tmp, HZ);
910
911 if (tmp > UINT_MAX)
912 io_allowed = UINT_MAX;
913 else
914 io_allowed = tmp;
8e89d13f
VG
915
916 if (tg->io_disp[rw] + 1 <= io_allowed) {
e43473b7
VG
917 if (wait)
918 *wait = 0;
5cf8c227 919 return true;
e43473b7
VG
920 }
921
8e89d13f 922 /* Calc approx time to dispatch */
9f626e37 923 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
8e89d13f
VG
924
925 if (jiffy_wait > jiffy_elapsed)
926 jiffy_wait = jiffy_wait - jiffy_elapsed;
927 else
928 jiffy_wait = 1;
929
930 if (wait)
931 *wait = jiffy_wait;
0b6bad7d 932 return false;
8e89d13f
VG
933}
934
0f3457f6
TH
935static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
936 unsigned long *wait)
8e89d13f
VG
937{
938 bool rw = bio_data_dir(bio);
3aad5d3e 939 u64 bytes_allowed, extra_bytes, tmp;
8e89d13f 940 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
ea0ea2bc 941 unsigned int bio_size = throtl_bio_data_size(bio);
e43473b7
VG
942
943 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
944
945 /* Slice has just started. Consider one slice interval */
946 if (!jiffy_elapsed)
297e3d85 947 jiffy_elapsed_rnd = tg->td->throtl_slice;
e43473b7 948
297e3d85 949 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
e43473b7 950
9f626e37 951 tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
5e901a2b 952 do_div(tmp, HZ);
3aad5d3e 953 bytes_allowed = tmp;
e43473b7 954
ea0ea2bc 955 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
e43473b7
VG
956 if (wait)
957 *wait = 0;
5cf8c227 958 return true;
e43473b7
VG
959 }
960
961 /* Calc approx time to dispatch */
ea0ea2bc 962 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
9f626e37 963 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
e43473b7
VG
964
965 if (!jiffy_wait)
966 jiffy_wait = 1;
967
968 /*
969 * This wait time is without taking into consideration the rounding
970 * up we did. Add that time also.
971 */
972 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
e43473b7
VG
973 if (wait)
974 *wait = jiffy_wait;
0b6bad7d 975 return false;
8e89d13f
VG
976}
977
978/*
979 * Returns whether one can dispatch a bio or not. Also returns approx number
980 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
981 */
0f3457f6
TH
982static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
983 unsigned long *wait)
8e89d13f
VG
984{
985 bool rw = bio_data_dir(bio);
986 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
987
988 /*
989 * Currently whole state machine of group depends on first bio
990 * queued in the group bio list. So one should not be calling
991 * this function with a different bio if there are other bios
992 * queued.
993 */
73f0d49a 994 BUG_ON(tg->service_queue.nr_queued[rw] &&
c5cc2070 995 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
e43473b7 996
8e89d13f 997 /* If tg->bps = -1, then BW is unlimited */
9f626e37
SL
998 if (tg_bps_limit(tg, rw) == U64_MAX &&
999 tg_iops_limit(tg, rw) == UINT_MAX) {
8e89d13f
VG
1000 if (wait)
1001 *wait = 0;
5cf8c227 1002 return true;
8e89d13f
VG
1003 }
1004
1005 /*
1006 * If previous slice expired, start a new one otherwise renew/extend
1007 * existing slice to make sure it is at least throtl_slice interval
164c80ed
VG
1008 * long since now. New slice is started only for empty throttle group.
1009 * If there is queued bio, that means there should be an active
1010 * slice and it should be extended instead.
8e89d13f 1011 */
164c80ed 1012 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
0f3457f6 1013 throtl_start_new_slice(tg, rw);
8e89d13f 1014 else {
297e3d85
SL
1015 if (time_before(tg->slice_end[rw],
1016 jiffies + tg->td->throtl_slice))
1017 throtl_extend_slice(tg, rw,
1018 jiffies + tg->td->throtl_slice);
8e89d13f
VG
1019 }
1020
0f3457f6
TH
1021 if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
1022 tg_with_in_iops_limit(tg, bio, &iops_wait)) {
8e89d13f
VG
1023 if (wait)
1024 *wait = 0;
0b6bad7d 1025 return true;
8e89d13f
VG
1026 }
1027
1028 max_wait = max(bps_wait, iops_wait);
1029
1030 if (wait)
1031 *wait = max_wait;
1032
1033 if (time_before(tg->slice_end[rw], jiffies + max_wait))
0f3457f6 1034 throtl_extend_slice(tg, rw, jiffies + max_wait);
e43473b7 1035
0b6bad7d 1036 return false;
e43473b7
VG
1037}
1038
1039static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1040{
1041 bool rw = bio_data_dir(bio);
ea0ea2bc 1042 unsigned int bio_size = throtl_bio_data_size(bio);
e43473b7
VG
1043
1044 /* Charge the bio to the group */
ea0ea2bc 1045 tg->bytes_disp[rw] += bio_size;
8e89d13f 1046 tg->io_disp[rw]++;
ea0ea2bc 1047 tg->last_bytes_disp[rw] += bio_size;
3f0abd80 1048 tg->last_io_disp[rw]++;
e43473b7 1049
2a0f61e6 1050 /*
8d2bbd4c 1051 * BIO_THROTTLED is used to prevent the same bio to be throttled
2a0f61e6
TH
1052 * more than once as a throttled bio will go through blk-throtl the
1053 * second time when it eventually gets issued. Set it when a bio
1054 * is being charged to a tg.
2a0f61e6 1055 */
8d2bbd4c
CH
1056 if (!bio_flagged(bio, BIO_THROTTLED))
1057 bio_set_flag(bio, BIO_THROTTLED);
e43473b7
VG
1058}
1059
c5cc2070
TH
1060/**
1061 * throtl_add_bio_tg - add a bio to the specified throtl_grp
1062 * @bio: bio to add
1063 * @qn: qnode to use
1064 * @tg: the target throtl_grp
1065 *
1066 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
1067 * tg->qnode_on_self[] is used.
1068 */
1069static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1070 struct throtl_grp *tg)
e43473b7 1071{
73f0d49a 1072 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
1073 bool rw = bio_data_dir(bio);
1074
c5cc2070
TH
1075 if (!qn)
1076 qn = &tg->qnode_on_self[rw];
1077
0e9f4164
TH
1078 /*
1079 * If @tg doesn't currently have any bios queued in the same
1080 * direction, queueing @bio can change when @tg should be
1081 * dispatched. Mark that @tg was empty. This is automatically
1082 * cleaered on the next tg_update_disptime().
1083 */
1084 if (!sq->nr_queued[rw])
1085 tg->flags |= THROTL_TG_WAS_EMPTY;
1086
c5cc2070
TH
1087 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1088
73f0d49a 1089 sq->nr_queued[rw]++;
77216b04 1090 throtl_enqueue_tg(tg);
e43473b7
VG
1091}
1092
77216b04 1093static void tg_update_disptime(struct throtl_grp *tg)
e43473b7 1094{
73f0d49a 1095 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
1096 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1097 struct bio *bio;
1098
d609af3a
ME
1099 bio = throtl_peek_queued(&sq->queued[READ]);
1100 if (bio)
0f3457f6 1101 tg_may_dispatch(tg, bio, &read_wait);
e43473b7 1102
d609af3a
ME
1103 bio = throtl_peek_queued(&sq->queued[WRITE]);
1104 if (bio)
0f3457f6 1105 tg_may_dispatch(tg, bio, &write_wait);
e43473b7
VG
1106
1107 min_wait = min(read_wait, write_wait);
1108 disptime = jiffies + min_wait;
1109
e43473b7 1110 /* Update dispatch time */
77216b04 1111 throtl_dequeue_tg(tg);
e43473b7 1112 tg->disptime = disptime;
77216b04 1113 throtl_enqueue_tg(tg);
0e9f4164
TH
1114
1115 /* see throtl_add_bio_tg() */
1116 tg->flags &= ~THROTL_TG_WAS_EMPTY;
e43473b7
VG
1117}
1118
32ee5bc4
VG
1119static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1120 struct throtl_grp *parent_tg, bool rw)
1121{
1122 if (throtl_slice_used(parent_tg, rw)) {
1123 throtl_start_new_slice_with_credit(parent_tg, rw,
1124 child_tg->slice_start[rw]);
1125 }
1126
1127}
1128
77216b04 1129static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
e43473b7 1130{
73f0d49a 1131 struct throtl_service_queue *sq = &tg->service_queue;
6bc9c2b4
TH
1132 struct throtl_service_queue *parent_sq = sq->parent_sq;
1133 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
c5cc2070 1134 struct throtl_grp *tg_to_put = NULL;
e43473b7
VG
1135 struct bio *bio;
1136
c5cc2070
TH
1137 /*
1138 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1139 * from @tg may put its reference and @parent_sq might end up
1140 * getting released prematurely. Remember the tg to put and put it
1141 * after @bio is transferred to @parent_sq.
1142 */
1143 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
73f0d49a 1144 sq->nr_queued[rw]--;
e43473b7
VG
1145
1146 throtl_charge_bio(tg, bio);
6bc9c2b4
TH
1147
1148 /*
1149 * If our parent is another tg, we just need to transfer @bio to
1150 * the parent using throtl_add_bio_tg(). If our parent is
1151 * @td->service_queue, @bio is ready to be issued. Put it on its
1152 * bio_lists[] and decrease total number queued. The caller is
1153 * responsible for issuing these bios.
1154 */
1155 if (parent_tg) {
c5cc2070 1156 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
32ee5bc4 1157 start_parent_slice_with_credit(tg, parent_tg, rw);
6bc9c2b4 1158 } else {
c5cc2070
TH
1159 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1160 &parent_sq->queued[rw]);
6bc9c2b4
TH
1161 BUG_ON(tg->td->nr_queued[rw] <= 0);
1162 tg->td->nr_queued[rw]--;
1163 }
e43473b7 1164
0f3457f6 1165 throtl_trim_slice(tg, rw);
6bc9c2b4 1166
c5cc2070
TH
1167 if (tg_to_put)
1168 blkg_put(tg_to_blkg(tg_to_put));
e43473b7
VG
1169}
1170
77216b04 1171static int throtl_dispatch_tg(struct throtl_grp *tg)
e43473b7 1172{
73f0d49a 1173 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
1174 unsigned int nr_reads = 0, nr_writes = 0;
1175 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
c2f6805d 1176 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
e43473b7
VG
1177 struct bio *bio;
1178
1179 /* Try to dispatch 75% READS and 25% WRITES */
1180
c5cc2070 1181 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
0f3457f6 1182 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 1183
77216b04 1184 tg_dispatch_one_bio(tg, bio_data_dir(bio));
e43473b7
VG
1185 nr_reads++;
1186
1187 if (nr_reads >= max_nr_reads)
1188 break;
1189 }
1190
c5cc2070 1191 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
0f3457f6 1192 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 1193
77216b04 1194 tg_dispatch_one_bio(tg, bio_data_dir(bio));
e43473b7
VG
1195 nr_writes++;
1196
1197 if (nr_writes >= max_nr_writes)
1198 break;
1199 }
1200
1201 return nr_reads + nr_writes;
1202}
1203
651930bc 1204static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
e43473b7
VG
1205{
1206 unsigned int nr_disp = 0;
e43473b7
VG
1207
1208 while (1) {
73f0d49a 1209 struct throtl_grp *tg = throtl_rb_first(parent_sq);
2ab74cd2 1210 struct throtl_service_queue *sq;
e43473b7
VG
1211
1212 if (!tg)
1213 break;
1214
1215 if (time_before(jiffies, tg->disptime))
1216 break;
1217
77216b04 1218 throtl_dequeue_tg(tg);
e43473b7 1219
77216b04 1220 nr_disp += throtl_dispatch_tg(tg);
e43473b7 1221
2ab74cd2 1222 sq = &tg->service_queue;
73f0d49a 1223 if (sq->nr_queued[0] || sq->nr_queued[1])
77216b04 1224 tg_update_disptime(tg);
e43473b7
VG
1225
1226 if (nr_disp >= throtl_quantum)
1227 break;
1228 }
1229
1230 return nr_disp;
1231}
1232
c79892c5
SL
1233static bool throtl_can_upgrade(struct throtl_data *td,
1234 struct throtl_grp *this_tg);
6e1a5704
TH
1235/**
1236 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1237 * @arg: the throtl_service_queue being serviced
1238 *
1239 * This timer is armed when a child throtl_grp with active bio's become
1240 * pending and queued on the service_queue's pending_tree and expires when
1241 * the first child throtl_grp should be dispatched. This function
2e48a530
TH
1242 * dispatches bio's from the children throtl_grps to the parent
1243 * service_queue.
1244 *
1245 * If the parent's parent is another throtl_grp, dispatching is propagated
1246 * by either arming its pending_timer or repeating dispatch directly. If
1247 * the top-level service_tree is reached, throtl_data->dispatch_work is
1248 * kicked so that the ready bio's are issued.
6e1a5704 1249 */
e99e88a9 1250static void throtl_pending_timer_fn(struct timer_list *t)
69df0ab0 1251{
e99e88a9 1252 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
2e48a530 1253 struct throtl_grp *tg = sq_to_tg(sq);
69df0ab0 1254 struct throtl_data *td = sq_to_td(sq);
cb76199c 1255 struct request_queue *q = td->queue;
2e48a530
TH
1256 struct throtl_service_queue *parent_sq;
1257 bool dispatched;
6e1a5704 1258 int ret;
e43473b7
VG
1259
1260 spin_lock_irq(q->queue_lock);
c79892c5
SL
1261 if (throtl_can_upgrade(td, NULL))
1262 throtl_upgrade_state(td);
1263
2e48a530
TH
1264again:
1265 parent_sq = sq->parent_sq;
1266 dispatched = false;
e43473b7 1267
7f52f98c
TH
1268 while (true) {
1269 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
2e48a530
TH
1270 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1271 sq->nr_queued[READ], sq->nr_queued[WRITE]);
7f52f98c
TH
1272
1273 ret = throtl_select_dispatch(sq);
1274 if (ret) {
7f52f98c
TH
1275 throtl_log(sq, "bios disp=%u", ret);
1276 dispatched = true;
1277 }
e43473b7 1278
7f52f98c
TH
1279 if (throtl_schedule_next_dispatch(sq, false))
1280 break;
e43473b7 1281
7f52f98c
TH
1282 /* this dispatch windows is still open, relax and repeat */
1283 spin_unlock_irq(q->queue_lock);
1284 cpu_relax();
1285 spin_lock_irq(q->queue_lock);
651930bc 1286 }
e43473b7 1287
2e48a530
TH
1288 if (!dispatched)
1289 goto out_unlock;
6e1a5704 1290
2e48a530
TH
1291 if (parent_sq) {
1292 /* @parent_sq is another throl_grp, propagate dispatch */
1293 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1294 tg_update_disptime(tg);
1295 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1296 /* window is already open, repeat dispatching */
1297 sq = parent_sq;
1298 tg = sq_to_tg(sq);
1299 goto again;
1300 }
1301 }
1302 } else {
1303 /* reached the top-level, queue issueing */
1304 queue_work(kthrotld_workqueue, &td->dispatch_work);
1305 }
1306out_unlock:
e43473b7 1307 spin_unlock_irq(q->queue_lock);
6e1a5704 1308}
e43473b7 1309
6e1a5704
TH
1310/**
1311 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1312 * @work: work item being executed
1313 *
1314 * This function is queued for execution when bio's reach the bio_lists[]
1315 * of throtl_data->service_queue. Those bio's are ready and issued by this
1316 * function.
1317 */
8876e140 1318static void blk_throtl_dispatch_work_fn(struct work_struct *work)
6e1a5704
TH
1319{
1320 struct throtl_data *td = container_of(work, struct throtl_data,
1321 dispatch_work);
1322 struct throtl_service_queue *td_sq = &td->service_queue;
1323 struct request_queue *q = td->queue;
1324 struct bio_list bio_list_on_stack;
1325 struct bio *bio;
1326 struct blk_plug plug;
1327 int rw;
1328
1329 bio_list_init(&bio_list_on_stack);
1330
1331 spin_lock_irq(q->queue_lock);
c5cc2070
TH
1332 for (rw = READ; rw <= WRITE; rw++)
1333 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1334 bio_list_add(&bio_list_on_stack, bio);
6e1a5704
TH
1335 spin_unlock_irq(q->queue_lock);
1336
1337 if (!bio_list_empty(&bio_list_on_stack)) {
69d60eb9 1338 blk_start_plug(&plug);
e43473b7
VG
1339 while((bio = bio_list_pop(&bio_list_on_stack)))
1340 generic_make_request(bio);
69d60eb9 1341 blk_finish_plug(&plug);
e43473b7 1342 }
e43473b7
VG
1343}
1344
f95a04af
TH
1345static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1346 int off)
60c2bc2d 1347{
f95a04af
TH
1348 struct throtl_grp *tg = pd_to_tg(pd);
1349 u64 v = *(u64 *)((void *)tg + off);
60c2bc2d 1350
2ab5492d 1351 if (v == U64_MAX)
60c2bc2d 1352 return 0;
f95a04af 1353 return __blkg_prfill_u64(sf, pd, v);
60c2bc2d
TH
1354}
1355
f95a04af
TH
1356static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1357 int off)
e43473b7 1358{
f95a04af
TH
1359 struct throtl_grp *tg = pd_to_tg(pd);
1360 unsigned int v = *(unsigned int *)((void *)tg + off);
fe071437 1361
2ab5492d 1362 if (v == UINT_MAX)
af133ceb 1363 return 0;
f95a04af 1364 return __blkg_prfill_u64(sf, pd, v);
e43473b7
VG
1365}
1366
2da8ca82 1367static int tg_print_conf_u64(struct seq_file *sf, void *v)
8e89d13f 1368{
2da8ca82
TH
1369 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1370 &blkcg_policy_throtl, seq_cft(sf)->private, false);
af133ceb 1371 return 0;
8e89d13f
VG
1372}
1373
2da8ca82 1374static int tg_print_conf_uint(struct seq_file *sf, void *v)
8e89d13f 1375{
2da8ca82
TH
1376 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1377 &blkcg_policy_throtl, seq_cft(sf)->private, false);
af133ceb 1378 return 0;
60c2bc2d
TH
1379}
1380
9bb67aeb 1381static void tg_conf_updated(struct throtl_grp *tg, bool global)
60c2bc2d 1382{
69948b07 1383 struct throtl_service_queue *sq = &tg->service_queue;
492eb21b 1384 struct cgroup_subsys_state *pos_css;
69948b07 1385 struct blkcg_gq *blkg;
af133ceb 1386
fda6f272
TH
1387 throtl_log(&tg->service_queue,
1388 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
9f626e37
SL
1389 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1390 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
632b4493 1391
693e751e
TH
1392 /*
1393 * Update has_rules[] flags for the updated tg's subtree. A tg is
1394 * considered to have rules if either the tg itself or any of its
1395 * ancestors has rules. This identifies groups without any
1396 * restrictions in the whole hierarchy and allows them to bypass
1397 * blk-throttle.
1398 */
9bb67aeb
SL
1399 blkg_for_each_descendant_pre(blkg, pos_css,
1400 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
5b81fc3c
SL
1401 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1402 struct throtl_grp *parent_tg;
1403
1404 tg_update_has_rules(this_tg);
1405 /* ignore root/second level */
1406 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1407 !blkg->parent->parent)
1408 continue;
1409 parent_tg = blkg_to_tg(blkg->parent);
1410 /*
1411 * make sure all children has lower idle time threshold and
1412 * higher latency target
1413 */
1414 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1415 parent_tg->idletime_threshold);
1416 this_tg->latency_target = max(this_tg->latency_target,
1417 parent_tg->latency_target);
1418 }
693e751e 1419
632b4493
TH
1420 /*
1421 * We're already holding queue_lock and know @tg is valid. Let's
1422 * apply the new config directly.
1423 *
1424 * Restart the slices for both READ and WRITES. It might happen
1425 * that a group's limit are dropped suddenly and we don't want to
1426 * account recently dispatched IO with new low rate.
1427 */
0f3457f6
TH
1428 throtl_start_new_slice(tg, 0);
1429 throtl_start_new_slice(tg, 1);
632b4493 1430
5b2c16aa 1431 if (tg->flags & THROTL_TG_PENDING) {
77216b04 1432 tg_update_disptime(tg);
7f52f98c 1433 throtl_schedule_next_dispatch(sq->parent_sq, true);
632b4493 1434 }
69948b07
TH
1435}
1436
1437static ssize_t tg_set_conf(struct kernfs_open_file *of,
1438 char *buf, size_t nbytes, loff_t off, bool is_u64)
1439{
1440 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1441 struct blkg_conf_ctx ctx;
1442 struct throtl_grp *tg;
1443 int ret;
1444 u64 v;
1445
1446 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1447 if (ret)
1448 return ret;
1449
1450 ret = -EINVAL;
1451 if (sscanf(ctx.body, "%llu", &v) != 1)
1452 goto out_finish;
1453 if (!v)
2ab5492d 1454 v = U64_MAX;
69948b07
TH
1455
1456 tg = blkg_to_tg(ctx.blkg);
1457
1458 if (is_u64)
1459 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1460 else
1461 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
60c2bc2d 1462
9bb67aeb 1463 tg_conf_updated(tg, false);
36aa9e5f
TH
1464 ret = 0;
1465out_finish:
60c2bc2d 1466 blkg_conf_finish(&ctx);
36aa9e5f 1467 return ret ?: nbytes;
8e89d13f
VG
1468}
1469
451af504
TH
1470static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1471 char *buf, size_t nbytes, loff_t off)
60c2bc2d 1472{
451af504 1473 return tg_set_conf(of, buf, nbytes, off, true);
60c2bc2d
TH
1474}
1475
451af504
TH
1476static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1477 char *buf, size_t nbytes, loff_t off)
60c2bc2d 1478{
451af504 1479 return tg_set_conf(of, buf, nbytes, off, false);
60c2bc2d
TH
1480}
1481
880f50e2 1482static struct cftype throtl_legacy_files[] = {
60c2bc2d
TH
1483 {
1484 .name = "throttle.read_bps_device",
9f626e37 1485 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
2da8ca82 1486 .seq_show = tg_print_conf_u64,
451af504 1487 .write = tg_set_conf_u64,
60c2bc2d
TH
1488 },
1489 {
1490 .name = "throttle.write_bps_device",
9f626e37 1491 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
2da8ca82 1492 .seq_show = tg_print_conf_u64,
451af504 1493 .write = tg_set_conf_u64,
60c2bc2d
TH
1494 },
1495 {
1496 .name = "throttle.read_iops_device",
9f626e37 1497 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
2da8ca82 1498 .seq_show = tg_print_conf_uint,
451af504 1499 .write = tg_set_conf_uint,
60c2bc2d
TH
1500 },
1501 {
1502 .name = "throttle.write_iops_device",
9f626e37 1503 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
2da8ca82 1504 .seq_show = tg_print_conf_uint,
451af504 1505 .write = tg_set_conf_uint,
60c2bc2d
TH
1506 },
1507 {
1508 .name = "throttle.io_service_bytes",
77ea7338
TH
1509 .private = (unsigned long)&blkcg_policy_throtl,
1510 .seq_show = blkg_print_stat_bytes,
60c2bc2d 1511 },
17534c6f 1512 {
1513 .name = "throttle.io_service_bytes_recursive",
1514 .private = (unsigned long)&blkcg_policy_throtl,
1515 .seq_show = blkg_print_stat_bytes_recursive,
1516 },
60c2bc2d
TH
1517 {
1518 .name = "throttle.io_serviced",
77ea7338
TH
1519 .private = (unsigned long)&blkcg_policy_throtl,
1520 .seq_show = blkg_print_stat_ios,
60c2bc2d 1521 },
17534c6f 1522 {
1523 .name = "throttle.io_serviced_recursive",
1524 .private = (unsigned long)&blkcg_policy_throtl,
1525 .seq_show = blkg_print_stat_ios_recursive,
1526 },
60c2bc2d
TH
1527 { } /* terminate */
1528};
1529
cd5ab1b0 1530static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
2ee867dc
TH
1531 int off)
1532{
1533 struct throtl_grp *tg = pd_to_tg(pd);
1534 const char *dname = blkg_dev_name(pd->blkg);
1535 char bufs[4][21] = { "max", "max", "max", "max" };
cd5ab1b0
SL
1536 u64 bps_dft;
1537 unsigned int iops_dft;
ada75b6e 1538 char idle_time[26] = "";
ec80991d 1539 char latency_time[26] = "";
2ee867dc
TH
1540
1541 if (!dname)
1542 return 0;
9f626e37 1543
cd5ab1b0
SL
1544 if (off == LIMIT_LOW) {
1545 bps_dft = 0;
1546 iops_dft = 0;
1547 } else {
1548 bps_dft = U64_MAX;
1549 iops_dft = UINT_MAX;
1550 }
1551
1552 if (tg->bps_conf[READ][off] == bps_dft &&
1553 tg->bps_conf[WRITE][off] == bps_dft &&
1554 tg->iops_conf[READ][off] == iops_dft &&
ada75b6e 1555 tg->iops_conf[WRITE][off] == iops_dft &&
ec80991d 1556 (off != LIMIT_LOW ||
b4f428ef 1557 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
5b81fc3c 1558 tg->latency_target_conf == DFL_LATENCY_TARGET)))
2ee867dc
TH
1559 return 0;
1560
9bb67aeb 1561 if (tg->bps_conf[READ][off] != U64_MAX)
9f626e37 1562 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
cd5ab1b0 1563 tg->bps_conf[READ][off]);
9bb67aeb 1564 if (tg->bps_conf[WRITE][off] != U64_MAX)
9f626e37 1565 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
cd5ab1b0 1566 tg->bps_conf[WRITE][off]);
9bb67aeb 1567 if (tg->iops_conf[READ][off] != UINT_MAX)
9f626e37 1568 snprintf(bufs[2], sizeof(bufs[2]), "%u",
cd5ab1b0 1569 tg->iops_conf[READ][off]);
9bb67aeb 1570 if (tg->iops_conf[WRITE][off] != UINT_MAX)
9f626e37 1571 snprintf(bufs[3], sizeof(bufs[3]), "%u",
cd5ab1b0 1572 tg->iops_conf[WRITE][off]);
ada75b6e 1573 if (off == LIMIT_LOW) {
5b81fc3c 1574 if (tg->idletime_threshold_conf == ULONG_MAX)
ada75b6e
SL
1575 strcpy(idle_time, " idle=max");
1576 else
1577 snprintf(idle_time, sizeof(idle_time), " idle=%lu",
5b81fc3c 1578 tg->idletime_threshold_conf);
ec80991d 1579
5b81fc3c 1580 if (tg->latency_target_conf == ULONG_MAX)
ec80991d
SL
1581 strcpy(latency_time, " latency=max");
1582 else
1583 snprintf(latency_time, sizeof(latency_time),
5b81fc3c 1584 " latency=%lu", tg->latency_target_conf);
ada75b6e 1585 }
2ee867dc 1586
ec80991d
SL
1587 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1588 dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1589 latency_time);
2ee867dc
TH
1590 return 0;
1591}
1592
cd5ab1b0 1593static int tg_print_limit(struct seq_file *sf, void *v)
2ee867dc 1594{
cd5ab1b0 1595 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
2ee867dc
TH
1596 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1597 return 0;
1598}
1599
cd5ab1b0 1600static ssize_t tg_set_limit(struct kernfs_open_file *of,
2ee867dc
TH
1601 char *buf, size_t nbytes, loff_t off)
1602{
1603 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1604 struct blkg_conf_ctx ctx;
1605 struct throtl_grp *tg;
1606 u64 v[4];
ada75b6e 1607 unsigned long idle_time;
ec80991d 1608 unsigned long latency_time;
2ee867dc 1609 int ret;
cd5ab1b0 1610 int index = of_cft(of)->private;
2ee867dc
TH
1611
1612 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1613 if (ret)
1614 return ret;
1615
1616 tg = blkg_to_tg(ctx.blkg);
1617
cd5ab1b0
SL
1618 v[0] = tg->bps_conf[READ][index];
1619 v[1] = tg->bps_conf[WRITE][index];
1620 v[2] = tg->iops_conf[READ][index];
1621 v[3] = tg->iops_conf[WRITE][index];
2ee867dc 1622
5b81fc3c
SL
1623 idle_time = tg->idletime_threshold_conf;
1624 latency_time = tg->latency_target_conf;
2ee867dc
TH
1625 while (true) {
1626 char tok[27]; /* wiops=18446744073709551616 */
1627 char *p;
2ab5492d 1628 u64 val = U64_MAX;
2ee867dc
TH
1629 int len;
1630
1631 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1632 break;
1633 if (tok[0] == '\0')
1634 break;
1635 ctx.body += len;
1636
1637 ret = -EINVAL;
1638 p = tok;
1639 strsep(&p, "=");
1640 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1641 goto out_finish;
1642
1643 ret = -ERANGE;
1644 if (!val)
1645 goto out_finish;
1646
1647 ret = -EINVAL;
1648 if (!strcmp(tok, "rbps"))
1649 v[0] = val;
1650 else if (!strcmp(tok, "wbps"))
1651 v[1] = val;
1652 else if (!strcmp(tok, "riops"))
1653 v[2] = min_t(u64, val, UINT_MAX);
1654 else if (!strcmp(tok, "wiops"))
1655 v[3] = min_t(u64, val, UINT_MAX);
ada75b6e
SL
1656 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1657 idle_time = val;
ec80991d
SL
1658 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1659 latency_time = val;
2ee867dc
TH
1660 else
1661 goto out_finish;
1662 }
1663
cd5ab1b0
SL
1664 tg->bps_conf[READ][index] = v[0];
1665 tg->bps_conf[WRITE][index] = v[1];
1666 tg->iops_conf[READ][index] = v[2];
1667 tg->iops_conf[WRITE][index] = v[3];
2ee867dc 1668
cd5ab1b0
SL
1669 if (index == LIMIT_MAX) {
1670 tg->bps[READ][index] = v[0];
1671 tg->bps[WRITE][index] = v[1];
1672 tg->iops[READ][index] = v[2];
1673 tg->iops[WRITE][index] = v[3];
1674 }
1675 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1676 tg->bps_conf[READ][LIMIT_MAX]);
1677 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1678 tg->bps_conf[WRITE][LIMIT_MAX]);
1679 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1680 tg->iops_conf[READ][LIMIT_MAX]);
1681 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1682 tg->iops_conf[WRITE][LIMIT_MAX]);
b4f428ef
SL
1683 tg->idletime_threshold_conf = idle_time;
1684 tg->latency_target_conf = latency_time;
1685
1686 /* force user to configure all settings for low limit */
1687 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1688 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1689 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1690 tg->latency_target_conf == DFL_LATENCY_TARGET) {
1691 tg->bps[READ][LIMIT_LOW] = 0;
1692 tg->bps[WRITE][LIMIT_LOW] = 0;
1693 tg->iops[READ][LIMIT_LOW] = 0;
1694 tg->iops[WRITE][LIMIT_LOW] = 0;
1695 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1696 tg->latency_target = DFL_LATENCY_TARGET;
1697 } else if (index == LIMIT_LOW) {
5b81fc3c 1698 tg->idletime_threshold = tg->idletime_threshold_conf;
5b81fc3c 1699 tg->latency_target = tg->latency_target_conf;
cd5ab1b0 1700 }
b4f428ef
SL
1701
1702 blk_throtl_update_limit_valid(tg->td);
1703 if (tg->td->limit_valid[LIMIT_LOW]) {
1704 if (index == LIMIT_LOW)
1705 tg->td->limit_index = LIMIT_LOW;
1706 } else
1707 tg->td->limit_index = LIMIT_MAX;
9bb67aeb
SL
1708 tg_conf_updated(tg, index == LIMIT_LOW &&
1709 tg->td->limit_valid[LIMIT_LOW]);
2ee867dc
TH
1710 ret = 0;
1711out_finish:
1712 blkg_conf_finish(&ctx);
1713 return ret ?: nbytes;
1714}
1715
1716static struct cftype throtl_files[] = {
cd5ab1b0
SL
1717#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1718 {
1719 .name = "low",
1720 .flags = CFTYPE_NOT_ON_ROOT,
1721 .seq_show = tg_print_limit,
1722 .write = tg_set_limit,
1723 .private = LIMIT_LOW,
1724 },
1725#endif
2ee867dc
TH
1726 {
1727 .name = "max",
1728 .flags = CFTYPE_NOT_ON_ROOT,
cd5ab1b0
SL
1729 .seq_show = tg_print_limit,
1730 .write = tg_set_limit,
1731 .private = LIMIT_MAX,
2ee867dc
TH
1732 },
1733 { } /* terminate */
1734};
1735
da527770 1736static void throtl_shutdown_wq(struct request_queue *q)
e43473b7
VG
1737{
1738 struct throtl_data *td = q->td;
1739
69df0ab0 1740 cancel_work_sync(&td->dispatch_work);
e43473b7
VG
1741}
1742
3c798398 1743static struct blkcg_policy blkcg_policy_throtl = {
2ee867dc 1744 .dfl_cftypes = throtl_files,
880f50e2 1745 .legacy_cftypes = throtl_legacy_files,
f9fcc2d3 1746
001bea73 1747 .pd_alloc_fn = throtl_pd_alloc,
f9fcc2d3 1748 .pd_init_fn = throtl_pd_init,
693e751e 1749 .pd_online_fn = throtl_pd_online,
cd5ab1b0 1750 .pd_offline_fn = throtl_pd_offline,
001bea73 1751 .pd_free_fn = throtl_pd_free,
e43473b7
VG
1752};
1753
3f0abd80
SL
1754static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1755{
1756 unsigned long rtime = jiffies, wtime = jiffies;
1757
1758 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1759 rtime = tg->last_low_overflow_time[READ];
1760 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1761 wtime = tg->last_low_overflow_time[WRITE];
1762 return min(rtime, wtime);
1763}
1764
1765/* tg should not be an intermediate node */
1766static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1767{
1768 struct throtl_service_queue *parent_sq;
1769 struct throtl_grp *parent = tg;
1770 unsigned long ret = __tg_last_low_overflow_time(tg);
1771
1772 while (true) {
1773 parent_sq = parent->service_queue.parent_sq;
1774 parent = sq_to_tg(parent_sq);
1775 if (!parent)
1776 break;
1777
1778 /*
1779 * The parent doesn't have low limit, it always reaches low
1780 * limit. Its overflow time is useless for children
1781 */
1782 if (!parent->bps[READ][LIMIT_LOW] &&
1783 !parent->iops[READ][LIMIT_LOW] &&
1784 !parent->bps[WRITE][LIMIT_LOW] &&
1785 !parent->iops[WRITE][LIMIT_LOW])
1786 continue;
1787 if (time_after(__tg_last_low_overflow_time(parent), ret))
1788 ret = __tg_last_low_overflow_time(parent);
1789 }
1790 return ret;
1791}
1792
9e234eea
SL
1793static bool throtl_tg_is_idle(struct throtl_grp *tg)
1794{
1795 /*
1796 * cgroup is idle if:
1797 * - single idle is too long, longer than a fixed value (in case user
b4f428ef 1798 * configure a too big threshold) or 4 times of idletime threshold
9e234eea 1799 * - average think time is more than threshold
53696b8d 1800 * - IO latency is largely below threshold
9e234eea 1801 */
b4f428ef 1802 unsigned long time;
4cff729f 1803 bool ret;
9e234eea 1804
b4f428ef
SL
1805 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1806 ret = tg->latency_target == DFL_LATENCY_TARGET ||
1807 tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1808 (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1809 tg->avg_idletime > tg->idletime_threshold ||
1810 (tg->latency_target && tg->bio_cnt &&
53696b8d 1811 tg->bad_bio_cnt * 5 < tg->bio_cnt);
4cff729f
SL
1812 throtl_log(&tg->service_queue,
1813 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1814 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1815 tg->bio_cnt, ret, tg->td->scale);
1816 return ret;
9e234eea
SL
1817}
1818
c79892c5
SL
1819static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1820{
1821 struct throtl_service_queue *sq = &tg->service_queue;
1822 bool read_limit, write_limit;
1823
1824 /*
1825 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1826 * reaches), it's ok to upgrade to next limit
1827 */
1828 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1829 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1830 if (!read_limit && !write_limit)
1831 return true;
1832 if (read_limit && sq->nr_queued[READ] &&
1833 (!write_limit || sq->nr_queued[WRITE]))
1834 return true;
1835 if (write_limit && sq->nr_queued[WRITE] &&
1836 (!read_limit || sq->nr_queued[READ]))
1837 return true;
aec24246
SL
1838
1839 if (time_after_eq(jiffies,
fa6fb5aa
SL
1840 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1841 throtl_tg_is_idle(tg))
aec24246 1842 return true;
c79892c5
SL
1843 return false;
1844}
1845
1846static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1847{
1848 while (true) {
1849 if (throtl_tg_can_upgrade(tg))
1850 return true;
1851 tg = sq_to_tg(tg->service_queue.parent_sq);
1852 if (!tg || !tg_to_blkg(tg)->parent)
1853 return false;
1854 }
1855 return false;
1856}
1857
1858static bool throtl_can_upgrade(struct throtl_data *td,
1859 struct throtl_grp *this_tg)
1860{
1861 struct cgroup_subsys_state *pos_css;
1862 struct blkcg_gq *blkg;
1863
1864 if (td->limit_index != LIMIT_LOW)
1865 return false;
1866
297e3d85 1867 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
3f0abd80
SL
1868 return false;
1869
c79892c5
SL
1870 rcu_read_lock();
1871 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1872 struct throtl_grp *tg = blkg_to_tg(blkg);
1873
1874 if (tg == this_tg)
1875 continue;
1876 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1877 continue;
1878 if (!throtl_hierarchy_can_upgrade(tg)) {
1879 rcu_read_unlock();
1880 return false;
1881 }
1882 }
1883 rcu_read_unlock();
1884 return true;
1885}
1886
fa6fb5aa
SL
1887static void throtl_upgrade_check(struct throtl_grp *tg)
1888{
1889 unsigned long now = jiffies;
1890
1891 if (tg->td->limit_index != LIMIT_LOW)
1892 return;
1893
1894 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1895 return;
1896
1897 tg->last_check_time = now;
1898
1899 if (!time_after_eq(now,
1900 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1901 return;
1902
1903 if (throtl_can_upgrade(tg->td, NULL))
1904 throtl_upgrade_state(tg->td);
1905}
1906
c79892c5
SL
1907static void throtl_upgrade_state(struct throtl_data *td)
1908{
1909 struct cgroup_subsys_state *pos_css;
1910 struct blkcg_gq *blkg;
1911
4cff729f 1912 throtl_log(&td->service_queue, "upgrade to max");
c79892c5 1913 td->limit_index = LIMIT_MAX;
3f0abd80 1914 td->low_upgrade_time = jiffies;
7394e31f 1915 td->scale = 0;
c79892c5
SL
1916 rcu_read_lock();
1917 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1918 struct throtl_grp *tg = blkg_to_tg(blkg);
1919 struct throtl_service_queue *sq = &tg->service_queue;
1920
1921 tg->disptime = jiffies - 1;
1922 throtl_select_dispatch(sq);
4f02fb76 1923 throtl_schedule_next_dispatch(sq, true);
c79892c5
SL
1924 }
1925 rcu_read_unlock();
1926 throtl_select_dispatch(&td->service_queue);
4f02fb76 1927 throtl_schedule_next_dispatch(&td->service_queue, true);
c79892c5
SL
1928 queue_work(kthrotld_workqueue, &td->dispatch_work);
1929}
1930
3f0abd80
SL
1931static void throtl_downgrade_state(struct throtl_data *td, int new)
1932{
7394e31f
SL
1933 td->scale /= 2;
1934
4cff729f 1935 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
7394e31f
SL
1936 if (td->scale) {
1937 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1938 return;
1939 }
1940
3f0abd80
SL
1941 td->limit_index = new;
1942 td->low_downgrade_time = jiffies;
1943}
1944
1945static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1946{
1947 struct throtl_data *td = tg->td;
1948 unsigned long now = jiffies;
1949
1950 /*
1951 * If cgroup is below low limit, consider downgrade and throttle other
1952 * cgroups
1953 */
297e3d85
SL
1954 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1955 time_after_eq(now, tg_last_low_overflow_time(tg) +
fa6fb5aa
SL
1956 td->throtl_slice) &&
1957 (!throtl_tg_is_idle(tg) ||
1958 !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
3f0abd80
SL
1959 return true;
1960 return false;
1961}
1962
1963static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1964{
1965 while (true) {
1966 if (!throtl_tg_can_downgrade(tg))
1967 return false;
1968 tg = sq_to_tg(tg->service_queue.parent_sq);
1969 if (!tg || !tg_to_blkg(tg)->parent)
1970 break;
1971 }
1972 return true;
1973}
1974
1975static void throtl_downgrade_check(struct throtl_grp *tg)
1976{
1977 uint64_t bps;
1978 unsigned int iops;
1979 unsigned long elapsed_time;
1980 unsigned long now = jiffies;
1981
1982 if (tg->td->limit_index != LIMIT_MAX ||
1983 !tg->td->limit_valid[LIMIT_LOW])
1984 return;
1985 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1986 return;
297e3d85 1987 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
3f0abd80
SL
1988 return;
1989
1990 elapsed_time = now - tg->last_check_time;
1991 tg->last_check_time = now;
1992
297e3d85
SL
1993 if (time_before(now, tg_last_low_overflow_time(tg) +
1994 tg->td->throtl_slice))
3f0abd80
SL
1995 return;
1996
1997 if (tg->bps[READ][LIMIT_LOW]) {
1998 bps = tg->last_bytes_disp[READ] * HZ;
1999 do_div(bps, elapsed_time);
2000 if (bps >= tg->bps[READ][LIMIT_LOW])
2001 tg->last_low_overflow_time[READ] = now;
2002 }
2003
2004 if (tg->bps[WRITE][LIMIT_LOW]) {
2005 bps = tg->last_bytes_disp[WRITE] * HZ;
2006 do_div(bps, elapsed_time);
2007 if (bps >= tg->bps[WRITE][LIMIT_LOW])
2008 tg->last_low_overflow_time[WRITE] = now;
2009 }
2010
2011 if (tg->iops[READ][LIMIT_LOW]) {
2012 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2013 if (iops >= tg->iops[READ][LIMIT_LOW])
2014 tg->last_low_overflow_time[READ] = now;
2015 }
2016
2017 if (tg->iops[WRITE][LIMIT_LOW]) {
2018 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2019 if (iops >= tg->iops[WRITE][LIMIT_LOW])
2020 tg->last_low_overflow_time[WRITE] = now;
2021 }
2022
2023 /*
2024 * If cgroup is below low limit, consider downgrade and throttle other
2025 * cgroups
2026 */
2027 if (throtl_hierarchy_can_downgrade(tg))
2028 throtl_downgrade_state(tg->td, LIMIT_LOW);
2029
2030 tg->last_bytes_disp[READ] = 0;
2031 tg->last_bytes_disp[WRITE] = 0;
2032 tg->last_io_disp[READ] = 0;
2033 tg->last_io_disp[WRITE] = 0;
2034}
2035
9e234eea
SL
2036static void blk_throtl_update_idletime(struct throtl_grp *tg)
2037{
2038 unsigned long now = ktime_get_ns() >> 10;
2039 unsigned long last_finish_time = tg->last_finish_time;
2040
2041 if (now <= last_finish_time || last_finish_time == 0 ||
2042 last_finish_time == tg->checked_last_finish_time)
2043 return;
2044
2045 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2046 tg->checked_last_finish_time = last_finish_time;
2047}
2048
b9147dd1
SL
2049#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2050static void throtl_update_latency_buckets(struct throtl_data *td)
2051{
b889bf66
JQ
2052 struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
2053 int i, cpu, rw;
2054 unsigned long last_latency[2] = { 0 };
2055 unsigned long latency[2];
b9147dd1
SL
2056
2057 if (!blk_queue_nonrot(td->queue))
2058 return;
2059 if (time_before(jiffies, td->last_calculate_time + HZ))
2060 return;
2061 td->last_calculate_time = jiffies;
2062
2063 memset(avg_latency, 0, sizeof(avg_latency));
b889bf66
JQ
2064 for (rw = READ; rw <= WRITE; rw++) {
2065 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2066 struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
2067
2068 for_each_possible_cpu(cpu) {
2069 struct latency_bucket *bucket;
2070
2071 /* this isn't race free, but ok in practice */
2072 bucket = per_cpu_ptr(td->latency_buckets[rw],
2073 cpu);
2074 tmp->total_latency += bucket[i].total_latency;
2075 tmp->samples += bucket[i].samples;
2076 bucket[i].total_latency = 0;
2077 bucket[i].samples = 0;
2078 }
b9147dd1 2079
b889bf66
JQ
2080 if (tmp->samples >= 32) {
2081 int samples = tmp->samples;
b9147dd1 2082
b889bf66 2083 latency[rw] = tmp->total_latency;
b9147dd1 2084
b889bf66
JQ
2085 tmp->total_latency = 0;
2086 tmp->samples = 0;
2087 latency[rw] /= samples;
2088 if (latency[rw] == 0)
2089 continue;
2090 avg_latency[rw][i].latency = latency[rw];
2091 }
b9147dd1
SL
2092 }
2093 }
2094
b889bf66
JQ
2095 for (rw = READ; rw <= WRITE; rw++) {
2096 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2097 if (!avg_latency[rw][i].latency) {
2098 if (td->avg_buckets[rw][i].latency < last_latency[rw])
2099 td->avg_buckets[rw][i].latency =
2100 last_latency[rw];
2101 continue;
2102 }
b9147dd1 2103
b889bf66
JQ
2104 if (!td->avg_buckets[rw][i].valid)
2105 latency[rw] = avg_latency[rw][i].latency;
2106 else
2107 latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2108 avg_latency[rw][i].latency) >> 3;
b9147dd1 2109
b889bf66
JQ
2110 td->avg_buckets[rw][i].latency = max(latency[rw],
2111 last_latency[rw]);
2112 td->avg_buckets[rw][i].valid = true;
2113 last_latency[rw] = td->avg_buckets[rw][i].latency;
2114 }
b9147dd1 2115 }
4cff729f
SL
2116
2117 for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2118 throtl_log(&td->service_queue,
b889bf66
JQ
2119 "Latency bucket %d: read latency=%ld, read valid=%d, "
2120 "write latency=%ld, write valid=%d", i,
2121 td->avg_buckets[READ][i].latency,
2122 td->avg_buckets[READ][i].valid,
2123 td->avg_buckets[WRITE][i].latency,
2124 td->avg_buckets[WRITE][i].valid);
b9147dd1
SL
2125}
2126#else
2127static inline void throtl_update_latency_buckets(struct throtl_data *td)
2128{
2129}
2130#endif
2131
2bc19cd5
JA
2132static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
2133{
2134#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
53cfdc10
JX
2135 if (bio->bi_css) {
2136 if (bio->bi_cg_private)
2137 blkg_put(tg_to_blkg(bio->bi_cg_private));
2bc19cd5 2138 bio->bi_cg_private = tg;
53cfdc10
JX
2139 blkg_get(tg_to_blkg(tg));
2140 }
5238dcf4 2141 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
2bc19cd5
JA
2142#endif
2143}
2144
ae118896
TH
2145bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
2146 struct bio *bio)
e43473b7 2147{
c5cc2070 2148 struct throtl_qnode *qn = NULL;
ae118896 2149 struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
73f0d49a 2150 struct throtl_service_queue *sq;
0e9f4164 2151 bool rw = bio_data_dir(bio);
bc16a4f9 2152 bool throttled = false;
b9147dd1 2153 struct throtl_data *td = tg->td;
e43473b7 2154
ae118896
TH
2155 WARN_ON_ONCE(!rcu_read_lock_held());
2156
2a0f61e6 2157 /* see throtl_charge_bio() */
8d2bbd4c 2158 if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
bc16a4f9 2159 goto out;
e43473b7
VG
2160
2161 spin_lock_irq(q->queue_lock);
c9589f03 2162
b9147dd1
SL
2163 throtl_update_latency_buckets(td);
2164
c9589f03 2165 if (unlikely(blk_queue_bypass(q)))
bc16a4f9 2166 goto out_unlock;
f469a7b4 2167
2bc19cd5 2168 blk_throtl_assoc_bio(tg, bio);
9e234eea
SL
2169 blk_throtl_update_idletime(tg);
2170
73f0d49a
TH
2171 sq = &tg->service_queue;
2172
c79892c5 2173again:
9e660acf 2174 while (true) {
3f0abd80
SL
2175 if (tg->last_low_overflow_time[rw] == 0)
2176 tg->last_low_overflow_time[rw] = jiffies;
2177 throtl_downgrade_check(tg);
fa6fb5aa 2178 throtl_upgrade_check(tg);
9e660acf
TH
2179 /* throtl is FIFO - if bios are already queued, should queue */
2180 if (sq->nr_queued[rw])
2181 break;
de701c74 2182
9e660acf 2183 /* if above limits, break to queue */
c79892c5 2184 if (!tg_may_dispatch(tg, bio, NULL)) {
3f0abd80 2185 tg->last_low_overflow_time[rw] = jiffies;
b9147dd1
SL
2186 if (throtl_can_upgrade(td, tg)) {
2187 throtl_upgrade_state(td);
c79892c5
SL
2188 goto again;
2189 }
9e660acf 2190 break;
c79892c5 2191 }
9e660acf
TH
2192
2193 /* within limits, let's charge and dispatch directly */
e43473b7 2194 throtl_charge_bio(tg, bio);
04521db0
VG
2195
2196 /*
2197 * We need to trim slice even when bios are not being queued
2198 * otherwise it might happen that a bio is not queued for
2199 * a long time and slice keeps on extending and trim is not
2200 * called for a long time. Now if limits are reduced suddenly
2201 * we take into account all the IO dispatched so far at new
2202 * low rate and * newly queued IO gets a really long dispatch
2203 * time.
2204 *
2205 * So keep on trimming slice even if bio is not queued.
2206 */
0f3457f6 2207 throtl_trim_slice(tg, rw);
9e660acf
TH
2208
2209 /*
2210 * @bio passed through this layer without being throttled.
2211 * Climb up the ladder. If we''re already at the top, it
2212 * can be executed directly.
2213 */
c5cc2070 2214 qn = &tg->qnode_on_parent[rw];
9e660acf
TH
2215 sq = sq->parent_sq;
2216 tg = sq_to_tg(sq);
2217 if (!tg)
2218 goto out_unlock;
e43473b7
VG
2219 }
2220
9e660acf 2221 /* out-of-limit, queue to @tg */
fda6f272
TH
2222 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2223 rw == READ ? 'R' : 'W',
9f626e37
SL
2224 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2225 tg_bps_limit(tg, rw),
2226 tg->io_disp[rw], tg_iops_limit(tg, rw),
fda6f272 2227 sq->nr_queued[READ], sq->nr_queued[WRITE]);
e43473b7 2228
3f0abd80
SL
2229 tg->last_low_overflow_time[rw] = jiffies;
2230
b9147dd1 2231 td->nr_queued[rw]++;
c5cc2070 2232 throtl_add_bio_tg(bio, qn, tg);
bc16a4f9 2233 throttled = true;
e43473b7 2234
7f52f98c
TH
2235 /*
2236 * Update @tg's dispatch time and force schedule dispatch if @tg
2237 * was empty before @bio. The forced scheduling isn't likely to
2238 * cause undue delay as @bio is likely to be dispatched directly if
2239 * its @tg's disptime is not in the future.
2240 */
0e9f4164 2241 if (tg->flags & THROTL_TG_WAS_EMPTY) {
77216b04 2242 tg_update_disptime(tg);
7f52f98c 2243 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
e43473b7
VG
2244 }
2245
bc16a4f9 2246out_unlock:
e43473b7 2247 spin_unlock_irq(q->queue_lock);
bc16a4f9 2248out:
111be883 2249 bio_set_flag(bio, BIO_THROTTLED);
b9147dd1
SL
2250
2251#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2252 if (throttled || !td->track_bio_latency)
5238dcf4 2253 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
b9147dd1 2254#endif
bc16a4f9 2255 return throttled;
e43473b7
VG
2256}
2257
9e234eea 2258#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
b9147dd1
SL
2259static void throtl_track_latency(struct throtl_data *td, sector_t size,
2260 int op, unsigned long time)
2261{
2262 struct latency_bucket *latency;
2263 int index;
2264
b889bf66
JQ
2265 if (!td || td->limit_index != LIMIT_LOW ||
2266 !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
b9147dd1
SL
2267 !blk_queue_nonrot(td->queue))
2268 return;
2269
2270 index = request_bucket_index(size);
2271
b889bf66 2272 latency = get_cpu_ptr(td->latency_buckets[op]);
b9147dd1
SL
2273 latency[index].total_latency += time;
2274 latency[index].samples++;
b889bf66 2275 put_cpu_ptr(td->latency_buckets[op]);
b9147dd1
SL
2276}
2277
2278void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2279{
2280 struct request_queue *q = rq->q;
2281 struct throtl_data *td = q->td;
2282
544ccc8d 2283 throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10);
b9147dd1
SL
2284}
2285
9e234eea
SL
2286void blk_throtl_bio_endio(struct bio *bio)
2287{
2288 struct throtl_grp *tg;
b9147dd1
SL
2289 u64 finish_time_ns;
2290 unsigned long finish_time;
2291 unsigned long start_time;
2292 unsigned long lat;
b889bf66 2293 int rw = bio_data_dir(bio);
9e234eea
SL
2294
2295 tg = bio->bi_cg_private;
2296 if (!tg)
2297 return;
2298 bio->bi_cg_private = NULL;
2299
b9147dd1
SL
2300 finish_time_ns = ktime_get_ns();
2301 tg->last_finish_time = finish_time_ns >> 10;
2302
5238dcf4
OS
2303 start_time = bio_issue_time(&bio->bi_issue) >> 10;
2304 finish_time = __bio_issue_time(finish_time_ns) >> 10;
53cfdc10
JX
2305 if (!start_time || finish_time <= start_time) {
2306 blkg_put(tg_to_blkg(tg));
53696b8d 2307 return;
53cfdc10 2308 }
53696b8d
SL
2309
2310 lat = finish_time - start_time;
b9147dd1 2311 /* this is only for bio based driver */
5238dcf4
OS
2312 if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2313 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2314 bio_op(bio), lat);
53696b8d 2315
6679a90c 2316 if (tg->latency_target && lat >= tg->td->filtered_latency) {
53696b8d
SL
2317 int bucket;
2318 unsigned int threshold;
2319
5238dcf4 2320 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
b889bf66 2321 threshold = tg->td->avg_buckets[rw][bucket].latency +
53696b8d
SL
2322 tg->latency_target;
2323 if (lat > threshold)
2324 tg->bad_bio_cnt++;
2325 /*
2326 * Not race free, could get wrong count, which means cgroups
2327 * will be throttled
2328 */
2329 tg->bio_cnt++;
2330 }
2331
2332 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2333 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2334 tg->bio_cnt /= 2;
2335 tg->bad_bio_cnt /= 2;
b9147dd1 2336 }
53cfdc10
JX
2337
2338 blkg_put(tg_to_blkg(tg));
9e234eea
SL
2339}
2340#endif
2341
2a12f0dc
TH
2342/*
2343 * Dispatch all bios from all children tg's queued on @parent_sq. On
2344 * return, @parent_sq is guaranteed to not have any active children tg's
2345 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2346 */
2347static void tg_drain_bios(struct throtl_service_queue *parent_sq)
2348{
2349 struct throtl_grp *tg;
2350
2351 while ((tg = throtl_rb_first(parent_sq))) {
2352 struct throtl_service_queue *sq = &tg->service_queue;
2353 struct bio *bio;
2354
2355 throtl_dequeue_tg(tg);
2356
c5cc2070 2357 while ((bio = throtl_peek_queued(&sq->queued[READ])))
2a12f0dc 2358 tg_dispatch_one_bio(tg, bio_data_dir(bio));
c5cc2070 2359 while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2a12f0dc
TH
2360 tg_dispatch_one_bio(tg, bio_data_dir(bio));
2361 }
2362}
2363
c9a929dd
TH
2364/**
2365 * blk_throtl_drain - drain throttled bios
2366 * @q: request_queue to drain throttled bios for
2367 *
2368 * Dispatch all currently throttled bios on @q through ->make_request_fn().
2369 */
2370void blk_throtl_drain(struct request_queue *q)
2371 __releases(q->queue_lock) __acquires(q->queue_lock)
2372{
2373 struct throtl_data *td = q->td;
2a12f0dc 2374 struct blkcg_gq *blkg;
492eb21b 2375 struct cgroup_subsys_state *pos_css;
c9a929dd 2376 struct bio *bio;
651930bc 2377 int rw;
c9a929dd 2378
8bcb6c7d 2379 queue_lockdep_assert_held(q);
2a12f0dc 2380 rcu_read_lock();
c9a929dd 2381
2a12f0dc
TH
2382 /*
2383 * Drain each tg while doing post-order walk on the blkg tree, so
2384 * that all bios are propagated to td->service_queue. It'd be
2385 * better to walk service_queue tree directly but blkg walk is
2386 * easier.
2387 */
492eb21b 2388 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2a12f0dc 2389 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
73f0d49a 2390
2a12f0dc
TH
2391 /* finally, transfer bios from top-level tg's into the td */
2392 tg_drain_bios(&td->service_queue);
2393
2394 rcu_read_unlock();
c9a929dd
TH
2395 spin_unlock_irq(q->queue_lock);
2396
2a12f0dc 2397 /* all bios now should be in td->service_queue, issue them */
651930bc 2398 for (rw = READ; rw <= WRITE; rw++)
c5cc2070
TH
2399 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
2400 NULL)))
651930bc 2401 generic_make_request(bio);
c9a929dd
TH
2402
2403 spin_lock_irq(q->queue_lock);
2404}
2405
e43473b7
VG
2406int blk_throtl_init(struct request_queue *q)
2407{
2408 struct throtl_data *td;
a2b1693b 2409 int ret;
e43473b7
VG
2410
2411 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2412 if (!td)
2413 return -ENOMEM;
b889bf66 2414 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
b9147dd1 2415 LATENCY_BUCKET_SIZE, __alignof__(u64));
b889bf66
JQ
2416 if (!td->latency_buckets[READ]) {
2417 kfree(td);
2418 return -ENOMEM;
2419 }
2420 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
b9147dd1 2421 LATENCY_BUCKET_SIZE, __alignof__(u64));
b889bf66
JQ
2422 if (!td->latency_buckets[WRITE]) {
2423 free_percpu(td->latency_buckets[READ]);
b9147dd1
SL
2424 kfree(td);
2425 return -ENOMEM;
2426 }
e43473b7 2427
69df0ab0 2428 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
b2ce2643 2429 throtl_service_queue_init(&td->service_queue);
e43473b7 2430
cd1604fa 2431 q->td = td;
29b12589 2432 td->queue = q;
02977e4a 2433
9f626e37 2434 td->limit_valid[LIMIT_MAX] = true;
cd5ab1b0 2435 td->limit_index = LIMIT_MAX;
3f0abd80
SL
2436 td->low_upgrade_time = jiffies;
2437 td->low_downgrade_time = jiffies;
9e234eea 2438
a2b1693b 2439 /* activate policy */
3c798398 2440 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
b9147dd1 2441 if (ret) {
b889bf66
JQ
2442 free_percpu(td->latency_buckets[READ]);
2443 free_percpu(td->latency_buckets[WRITE]);
f51b802c 2444 kfree(td);
b9147dd1 2445 }
a2b1693b 2446 return ret;
e43473b7
VG
2447}
2448
2449void blk_throtl_exit(struct request_queue *q)
2450{
c875f4d0 2451 BUG_ON(!q->td);
da527770 2452 throtl_shutdown_wq(q);
3c798398 2453 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
b889bf66
JQ
2454 free_percpu(q->td->latency_buckets[READ]);
2455 free_percpu(q->td->latency_buckets[WRITE]);
c9a929dd 2456 kfree(q->td);
e43473b7
VG
2457}
2458
d61fcfa4
SL
2459void blk_throtl_register_queue(struct request_queue *q)
2460{
2461 struct throtl_data *td;
6679a90c 2462 int i;
d61fcfa4
SL
2463
2464 td = q->td;
2465 BUG_ON(!td);
2466
6679a90c 2467 if (blk_queue_nonrot(q)) {
d61fcfa4 2468 td->throtl_slice = DFL_THROTL_SLICE_SSD;
6679a90c
SL
2469 td->filtered_latency = LATENCY_FILTERED_SSD;
2470 } else {
d61fcfa4 2471 td->throtl_slice = DFL_THROTL_SLICE_HD;
6679a90c 2472 td->filtered_latency = LATENCY_FILTERED_HD;
b889bf66
JQ
2473 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2474 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2475 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2476 }
6679a90c 2477 }
d61fcfa4
SL
2478#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2479 /* if no low limit, use previous default */
2480 td->throtl_slice = DFL_THROTL_SLICE_HD;
2481#endif
9e234eea 2482
475a055e 2483 td->track_bio_latency = !queue_is_rq_based(q);
b9147dd1
SL
2484 if (!td->track_bio_latency)
2485 blk_stat_enable_accounting(q);
d61fcfa4
SL
2486}
2487
297e3d85
SL
2488#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2489ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2490{
2491 if (!q->td)
2492 return -EINVAL;
2493 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2494}
2495
2496ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2497 const char *page, size_t count)
2498{
2499 unsigned long v;
2500 unsigned long t;
2501
2502 if (!q->td)
2503 return -EINVAL;
2504 if (kstrtoul(page, 10, &v))
2505 return -EINVAL;
2506 t = msecs_to_jiffies(v);
2507 if (t == 0 || t > MAX_THROTL_SLICE)
2508 return -EINVAL;
2509 q->td->throtl_slice = t;
2510 return count;
2511}
2512#endif
2513
e43473b7
VG
2514static int __init throtl_init(void)
2515{
450adcbe
VG
2516 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2517 if (!kthrotld_workqueue)
2518 panic("Failed to create kthrotld\n");
2519
3c798398 2520 return blkcg_policy_register(&blkcg_policy_throtl);
e43473b7
VG
2521}
2522
2523module_init(throtl_init);