block: move blk-throtl fast path inline
[linux-block.git] / block / blk-throttle.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interface for controlling IO bandwidth on a request queue
4  *
5  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
13 #include <linux/blk-cgroup.h>
14 #include "blk.h"
15 #include "blk-cgroup-rwstat.h"
16 #include "blk-throttle.h"
17
18 /* Max dispatch from a group in 1 round */
19 #define THROTL_GRP_QUANTUM 8
20
21 /* Total max dispatch from all groups in one round */
22 #define THROTL_QUANTUM 32
23
24 /* Throttling is performed over a slice and after that slice is renewed */
25 #define DFL_THROTL_SLICE_HD (HZ / 10)
26 #define DFL_THROTL_SLICE_SSD (HZ / 50)
27 #define MAX_THROTL_SLICE (HZ)
28 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
29 #define MIN_THROTL_BPS (320 * 1024)
30 #define MIN_THROTL_IOPS (10)
31 #define DFL_LATENCY_TARGET (-1L)
32 #define DFL_IDLE_THRESHOLD (0)
33 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
34 #define LATENCY_FILTERED_SSD (0)
35 /*
36  * For HD, very small latency comes from sequential IO. Such IO is helpless to
37  * help determine if its IO is impacted by others, hence we ignore the IO
38  */
39 #define LATENCY_FILTERED_HD (1000L) /* 1ms */
40
41 /* A workqueue to queue throttle related work */
42 static struct workqueue_struct *kthrotld_workqueue;
43
44 enum tg_state_flags {
45         THROTL_TG_PENDING       = 1 << 0,       /* on parent's pending tree */
46         THROTL_TG_WAS_EMPTY     = 1 << 1,       /* bio_lists[] became non-empty */
47 };
48
49 #define rb_entry_tg(node)       rb_entry((node), struct throtl_grp, rb_node)
50
51 /* We measure latency for request size from <= 4k to >= 1M */
52 #define LATENCY_BUCKET_SIZE 9
53
54 struct latency_bucket {
55         unsigned long total_latency; /* ns / 1024 */
56         int samples;
57 };
58
59 struct avg_latency_bucket {
60         unsigned long latency; /* ns / 1024 */
61         bool valid;
62 };
63
64 struct throtl_data
65 {
66         /* service tree for active throtl groups */
67         struct throtl_service_queue service_queue;
68
69         struct request_queue *queue;
70
71         /* Total Number of queued bios on READ and WRITE lists */
72         unsigned int nr_queued[2];
73
74         unsigned int throtl_slice;
75
76         /* Work for dispatching throttled bios */
77         struct work_struct dispatch_work;
78         unsigned int limit_index;
79         bool limit_valid[LIMIT_CNT];
80
81         unsigned long low_upgrade_time;
82         unsigned long low_downgrade_time;
83
84         unsigned int scale;
85
86         struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
87         struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
88         struct latency_bucket __percpu *latency_buckets[2];
89         unsigned long last_calculate_time;
90         unsigned long filtered_latency;
91
92         bool track_bio_latency;
93 };
94
95 static void throtl_pending_timer_fn(struct timer_list *t);
96
97 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
98 {
99         return pd_to_blkg(&tg->pd);
100 }
101
102 /**
103  * sq_to_tg - return the throl_grp the specified service queue belongs to
104  * @sq: the throtl_service_queue of interest
105  *
106  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
107  * embedded in throtl_data, %NULL is returned.
108  */
109 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
110 {
111         if (sq && sq->parent_sq)
112                 return container_of(sq, struct throtl_grp, service_queue);
113         else
114                 return NULL;
115 }
116
117 /**
118  * sq_to_td - return throtl_data the specified service queue belongs to
119  * @sq: the throtl_service_queue of interest
120  *
121  * A service_queue can be embedded in either a throtl_grp or throtl_data.
122  * Determine the associated throtl_data accordingly and return it.
123  */
124 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
125 {
126         struct throtl_grp *tg = sq_to_tg(sq);
127
128         if (tg)
129                 return tg->td;
130         else
131                 return container_of(sq, struct throtl_data, service_queue);
132 }
133
134 /*
135  * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
136  * make the IO dispatch more smooth.
137  * Scale up: linearly scale up according to lapsed time since upgrade. For
138  *           every throtl_slice, the limit scales up 1/2 .low limit till the
139  *           limit hits .max limit
140  * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
141  */
142 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
143 {
144         /* arbitrary value to avoid too big scale */
145         if (td->scale < 4096 && time_after_eq(jiffies,
146             td->low_upgrade_time + td->scale * td->throtl_slice))
147                 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
148
149         return low + (low >> 1) * td->scale;
150 }
151
152 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
153 {
154         struct blkcg_gq *blkg = tg_to_blkg(tg);
155         struct throtl_data *td;
156         uint64_t ret;
157
158         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
159                 return U64_MAX;
160
161         td = tg->td;
162         ret = tg->bps[rw][td->limit_index];
163         if (ret == 0 && td->limit_index == LIMIT_LOW) {
164                 /* intermediate node or iops isn't 0 */
165                 if (!list_empty(&blkg->blkcg->css.children) ||
166                     tg->iops[rw][td->limit_index])
167                         return U64_MAX;
168                 else
169                         return MIN_THROTL_BPS;
170         }
171
172         if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
173             tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
174                 uint64_t adjusted;
175
176                 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
177                 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
178         }
179         return ret;
180 }
181
182 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
183 {
184         struct blkcg_gq *blkg = tg_to_blkg(tg);
185         struct throtl_data *td;
186         unsigned int ret;
187
188         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
189                 return UINT_MAX;
190
191         td = tg->td;
192         ret = tg->iops[rw][td->limit_index];
193         if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
194                 /* intermediate node or bps isn't 0 */
195                 if (!list_empty(&blkg->blkcg->css.children) ||
196                     tg->bps[rw][td->limit_index])
197                         return UINT_MAX;
198                 else
199                         return MIN_THROTL_IOPS;
200         }
201
202         if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
203             tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
204                 uint64_t adjusted;
205
206                 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
207                 if (adjusted > UINT_MAX)
208                         adjusted = UINT_MAX;
209                 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
210         }
211         return ret;
212 }
213
214 #define request_bucket_index(sectors) \
215         clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
216
217 /**
218  * throtl_log - log debug message via blktrace
219  * @sq: the service_queue being reported
220  * @fmt: printf format string
221  * @args: printf args
222  *
223  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
224  * throtl_grp; otherwise, just "throtl".
225  */
226 #define throtl_log(sq, fmt, args...)    do {                            \
227         struct throtl_grp *__tg = sq_to_tg((sq));                       \
228         struct throtl_data *__td = sq_to_td((sq));                      \
229                                                                         \
230         (void)__td;                                                     \
231         if (likely(!blk_trace_note_message_enabled(__td->queue)))       \
232                 break;                                                  \
233         if ((__tg)) {                                                   \
234                 blk_add_cgroup_trace_msg(__td->queue,                   \
235                         tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
236         } else {                                                        \
237                 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);  \
238         }                                                               \
239 } while (0)
240
241 static inline unsigned int throtl_bio_data_size(struct bio *bio)
242 {
243         /* assume it's one sector */
244         if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
245                 return 512;
246         return bio->bi_iter.bi_size;
247 }
248
249 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
250 {
251         INIT_LIST_HEAD(&qn->node);
252         bio_list_init(&qn->bios);
253         qn->tg = tg;
254 }
255
256 /**
257  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
258  * @bio: bio being added
259  * @qn: qnode to add bio to
260  * @queued: the service_queue->queued[] list @qn belongs to
261  *
262  * Add @bio to @qn and put @qn on @queued if it's not already on.
263  * @qn->tg's reference count is bumped when @qn is activated.  See the
264  * comment on top of throtl_qnode definition for details.
265  */
266 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
267                                  struct list_head *queued)
268 {
269         bio_list_add(&qn->bios, bio);
270         if (list_empty(&qn->node)) {
271                 list_add_tail(&qn->node, queued);
272                 blkg_get(tg_to_blkg(qn->tg));
273         }
274 }
275
276 /**
277  * throtl_peek_queued - peek the first bio on a qnode list
278  * @queued: the qnode list to peek
279  */
280 static struct bio *throtl_peek_queued(struct list_head *queued)
281 {
282         struct throtl_qnode *qn;
283         struct bio *bio;
284
285         if (list_empty(queued))
286                 return NULL;
287
288         qn = list_first_entry(queued, struct throtl_qnode, node);
289         bio = bio_list_peek(&qn->bios);
290         WARN_ON_ONCE(!bio);
291         return bio;
292 }
293
294 /**
295  * throtl_pop_queued - pop the first bio form a qnode list
296  * @queued: the qnode list to pop a bio from
297  * @tg_to_put: optional out argument for throtl_grp to put
298  *
299  * Pop the first bio from the qnode list @queued.  After popping, the first
300  * qnode is removed from @queued if empty or moved to the end of @queued so
301  * that the popping order is round-robin.
302  *
303  * When the first qnode is removed, its associated throtl_grp should be put
304  * too.  If @tg_to_put is NULL, this function automatically puts it;
305  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
306  * responsible for putting it.
307  */
308 static struct bio *throtl_pop_queued(struct list_head *queued,
309                                      struct throtl_grp **tg_to_put)
310 {
311         struct throtl_qnode *qn;
312         struct bio *bio;
313
314         if (list_empty(queued))
315                 return NULL;
316
317         qn = list_first_entry(queued, struct throtl_qnode, node);
318         bio = bio_list_pop(&qn->bios);
319         WARN_ON_ONCE(!bio);
320
321         if (bio_list_empty(&qn->bios)) {
322                 list_del_init(&qn->node);
323                 if (tg_to_put)
324                         *tg_to_put = qn->tg;
325                 else
326                         blkg_put(tg_to_blkg(qn->tg));
327         } else {
328                 list_move_tail(&qn->node, queued);
329         }
330
331         return bio;
332 }
333
334 /* init a service_queue, assumes the caller zeroed it */
335 static void throtl_service_queue_init(struct throtl_service_queue *sq)
336 {
337         INIT_LIST_HEAD(&sq->queued[0]);
338         INIT_LIST_HEAD(&sq->queued[1]);
339         sq->pending_tree = RB_ROOT_CACHED;
340         timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
341 }
342
343 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
344                                                 struct request_queue *q,
345                                                 struct blkcg *blkcg)
346 {
347         struct throtl_grp *tg;
348         int rw;
349
350         tg = kzalloc_node(sizeof(*tg), gfp, q->node);
351         if (!tg)
352                 return NULL;
353
354         if (blkg_rwstat_init(&tg->stat_bytes, gfp))
355                 goto err_free_tg;
356
357         if (blkg_rwstat_init(&tg->stat_ios, gfp))
358                 goto err_exit_stat_bytes;
359
360         throtl_service_queue_init(&tg->service_queue);
361
362         for (rw = READ; rw <= WRITE; rw++) {
363                 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
364                 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
365         }
366
367         RB_CLEAR_NODE(&tg->rb_node);
368         tg->bps[READ][LIMIT_MAX] = U64_MAX;
369         tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
370         tg->iops[READ][LIMIT_MAX] = UINT_MAX;
371         tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
372         tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
373         tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
374         tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
375         tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
376         /* LIMIT_LOW will have default value 0 */
377
378         tg->latency_target = DFL_LATENCY_TARGET;
379         tg->latency_target_conf = DFL_LATENCY_TARGET;
380         tg->idletime_threshold = DFL_IDLE_THRESHOLD;
381         tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
382
383         return &tg->pd;
384
385 err_exit_stat_bytes:
386         blkg_rwstat_exit(&tg->stat_bytes);
387 err_free_tg:
388         kfree(tg);
389         return NULL;
390 }
391
392 static void throtl_pd_init(struct blkg_policy_data *pd)
393 {
394         struct throtl_grp *tg = pd_to_tg(pd);
395         struct blkcg_gq *blkg = tg_to_blkg(tg);
396         struct throtl_data *td = blkg->q->td;
397         struct throtl_service_queue *sq = &tg->service_queue;
398
399         /*
400          * If on the default hierarchy, we switch to properly hierarchical
401          * behavior where limits on a given throtl_grp are applied to the
402          * whole subtree rather than just the group itself.  e.g. If 16M
403          * read_bps limit is set on the root group, the whole system can't
404          * exceed 16M for the device.
405          *
406          * If not on the default hierarchy, the broken flat hierarchy
407          * behavior is retained where all throtl_grps are treated as if
408          * they're all separate root groups right below throtl_data.
409          * Limits of a group don't interact with limits of other groups
410          * regardless of the position of the group in the hierarchy.
411          */
412         sq->parent_sq = &td->service_queue;
413         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
414                 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
415         tg->td = td;
416 }
417
418 /*
419  * Set has_rules[] if @tg or any of its parents have limits configured.
420  * This doesn't require walking up to the top of the hierarchy as the
421  * parent's has_rules[] is guaranteed to be correct.
422  */
423 static void tg_update_has_rules(struct throtl_grp *tg)
424 {
425         struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
426         struct throtl_data *td = tg->td;
427         int rw;
428
429         for (rw = READ; rw <= WRITE; rw++)
430                 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
431                         (td->limit_valid[td->limit_index] &&
432                          (tg_bps_limit(tg, rw) != U64_MAX ||
433                           tg_iops_limit(tg, rw) != UINT_MAX));
434 }
435
436 static void throtl_pd_online(struct blkg_policy_data *pd)
437 {
438         struct throtl_grp *tg = pd_to_tg(pd);
439         /*
440          * We don't want new groups to escape the limits of its ancestors.
441          * Update has_rules[] after a new group is brought online.
442          */
443         tg_update_has_rules(tg);
444 }
445
446 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
447 static void blk_throtl_update_limit_valid(struct throtl_data *td)
448 {
449         struct cgroup_subsys_state *pos_css;
450         struct blkcg_gq *blkg;
451         bool low_valid = false;
452
453         rcu_read_lock();
454         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
455                 struct throtl_grp *tg = blkg_to_tg(blkg);
456
457                 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
458                     tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
459                         low_valid = true;
460                         break;
461                 }
462         }
463         rcu_read_unlock();
464
465         td->limit_valid[LIMIT_LOW] = low_valid;
466 }
467 #else
468 static inline void blk_throtl_update_limit_valid(struct throtl_data *td)
469 {
470 }
471 #endif
472
473 static void throtl_upgrade_state(struct throtl_data *td);
474 static void throtl_pd_offline(struct blkg_policy_data *pd)
475 {
476         struct throtl_grp *tg = pd_to_tg(pd);
477
478         tg->bps[READ][LIMIT_LOW] = 0;
479         tg->bps[WRITE][LIMIT_LOW] = 0;
480         tg->iops[READ][LIMIT_LOW] = 0;
481         tg->iops[WRITE][LIMIT_LOW] = 0;
482
483         blk_throtl_update_limit_valid(tg->td);
484
485         if (!tg->td->limit_valid[tg->td->limit_index])
486                 throtl_upgrade_state(tg->td);
487 }
488
489 static void throtl_pd_free(struct blkg_policy_data *pd)
490 {
491         struct throtl_grp *tg = pd_to_tg(pd);
492
493         del_timer_sync(&tg->service_queue.pending_timer);
494         blkg_rwstat_exit(&tg->stat_bytes);
495         blkg_rwstat_exit(&tg->stat_ios);
496         kfree(tg);
497 }
498
499 static struct throtl_grp *
500 throtl_rb_first(struct throtl_service_queue *parent_sq)
501 {
502         struct rb_node *n;
503
504         n = rb_first_cached(&parent_sq->pending_tree);
505         WARN_ON_ONCE(!n);
506         if (!n)
507                 return NULL;
508         return rb_entry_tg(n);
509 }
510
511 static void throtl_rb_erase(struct rb_node *n,
512                             struct throtl_service_queue *parent_sq)
513 {
514         rb_erase_cached(n, &parent_sq->pending_tree);
515         RB_CLEAR_NODE(n);
516         --parent_sq->nr_pending;
517 }
518
519 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
520 {
521         struct throtl_grp *tg;
522
523         tg = throtl_rb_first(parent_sq);
524         if (!tg)
525                 return;
526
527         parent_sq->first_pending_disptime = tg->disptime;
528 }
529
530 static void tg_service_queue_add(struct throtl_grp *tg)
531 {
532         struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
533         struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
534         struct rb_node *parent = NULL;
535         struct throtl_grp *__tg;
536         unsigned long key = tg->disptime;
537         bool leftmost = true;
538
539         while (*node != NULL) {
540                 parent = *node;
541                 __tg = rb_entry_tg(parent);
542
543                 if (time_before(key, __tg->disptime))
544                         node = &parent->rb_left;
545                 else {
546                         node = &parent->rb_right;
547                         leftmost = false;
548                 }
549         }
550
551         rb_link_node(&tg->rb_node, parent, node);
552         rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
553                                leftmost);
554 }
555
556 static void throtl_enqueue_tg(struct throtl_grp *tg)
557 {
558         if (!(tg->flags & THROTL_TG_PENDING)) {
559                 tg_service_queue_add(tg);
560                 tg->flags |= THROTL_TG_PENDING;
561                 tg->service_queue.parent_sq->nr_pending++;
562         }
563 }
564
565 static void throtl_dequeue_tg(struct throtl_grp *tg)
566 {
567         if (tg->flags & THROTL_TG_PENDING) {
568                 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
569                 tg->flags &= ~THROTL_TG_PENDING;
570         }
571 }
572
573 /* Call with queue lock held */
574 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
575                                           unsigned long expires)
576 {
577         unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
578
579         /*
580          * Since we are adjusting the throttle limit dynamically, the sleep
581          * time calculated according to previous limit might be invalid. It's
582          * possible the cgroup sleep time is very long and no other cgroups
583          * have IO running so notify the limit changes. Make sure the cgroup
584          * doesn't sleep too long to avoid the missed notification.
585          */
586         if (time_after(expires, max_expire))
587                 expires = max_expire;
588         mod_timer(&sq->pending_timer, expires);
589         throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
590                    expires - jiffies, jiffies);
591 }
592
593 /**
594  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
595  * @sq: the service_queue to schedule dispatch for
596  * @force: force scheduling
597  *
598  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
599  * dispatch time of the first pending child.  Returns %true if either timer
600  * is armed or there's no pending child left.  %false if the current
601  * dispatch window is still open and the caller should continue
602  * dispatching.
603  *
604  * If @force is %true, the dispatch timer is always scheduled and this
605  * function is guaranteed to return %true.  This is to be used when the
606  * caller can't dispatch itself and needs to invoke pending_timer
607  * unconditionally.  Note that forced scheduling is likely to induce short
608  * delay before dispatch starts even if @sq->first_pending_disptime is not
609  * in the future and thus shouldn't be used in hot paths.
610  */
611 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
612                                           bool force)
613 {
614         /* any pending children left? */
615         if (!sq->nr_pending)
616                 return true;
617
618         update_min_dispatch_time(sq);
619
620         /* is the next dispatch time in the future? */
621         if (force || time_after(sq->first_pending_disptime, jiffies)) {
622                 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
623                 return true;
624         }
625
626         /* tell the caller to continue dispatching */
627         return false;
628 }
629
630 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
631                 bool rw, unsigned long start)
632 {
633         tg->bytes_disp[rw] = 0;
634         tg->io_disp[rw] = 0;
635
636         atomic_set(&tg->io_split_cnt[rw], 0);
637
638         /*
639          * Previous slice has expired. We must have trimmed it after last
640          * bio dispatch. That means since start of last slice, we never used
641          * that bandwidth. Do try to make use of that bandwidth while giving
642          * credit.
643          */
644         if (time_after_eq(start, tg->slice_start[rw]))
645                 tg->slice_start[rw] = start;
646
647         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
648         throtl_log(&tg->service_queue,
649                    "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
650                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
651                    tg->slice_end[rw], jiffies);
652 }
653
654 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
655 {
656         tg->bytes_disp[rw] = 0;
657         tg->io_disp[rw] = 0;
658         tg->slice_start[rw] = jiffies;
659         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
660
661         atomic_set(&tg->io_split_cnt[rw], 0);
662
663         throtl_log(&tg->service_queue,
664                    "[%c] new slice start=%lu end=%lu jiffies=%lu",
665                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
666                    tg->slice_end[rw], jiffies);
667 }
668
669 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
670                                         unsigned long jiffy_end)
671 {
672         tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
673 }
674
675 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
676                                        unsigned long jiffy_end)
677 {
678         throtl_set_slice_end(tg, rw, jiffy_end);
679         throtl_log(&tg->service_queue,
680                    "[%c] extend slice start=%lu end=%lu jiffies=%lu",
681                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
682                    tg->slice_end[rw], jiffies);
683 }
684
685 /* Determine if previously allocated or extended slice is complete or not */
686 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
687 {
688         if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
689                 return false;
690
691         return true;
692 }
693
694 /* Trim the used slices and adjust slice start accordingly */
695 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
696 {
697         unsigned long nr_slices, time_elapsed, io_trim;
698         u64 bytes_trim, tmp;
699
700         BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
701
702         /*
703          * If bps are unlimited (-1), then time slice don't get
704          * renewed. Don't try to trim the slice if slice is used. A new
705          * slice will start when appropriate.
706          */
707         if (throtl_slice_used(tg, rw))
708                 return;
709
710         /*
711          * A bio has been dispatched. Also adjust slice_end. It might happen
712          * that initially cgroup limit was very low resulting in high
713          * slice_end, but later limit was bumped up and bio was dispatched
714          * sooner, then we need to reduce slice_end. A high bogus slice_end
715          * is bad because it does not allow new slice to start.
716          */
717
718         throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
719
720         time_elapsed = jiffies - tg->slice_start[rw];
721
722         nr_slices = time_elapsed / tg->td->throtl_slice;
723
724         if (!nr_slices)
725                 return;
726         tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
727         do_div(tmp, HZ);
728         bytes_trim = tmp;
729
730         io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
731                 HZ;
732
733         if (!bytes_trim && !io_trim)
734                 return;
735
736         if (tg->bytes_disp[rw] >= bytes_trim)
737                 tg->bytes_disp[rw] -= bytes_trim;
738         else
739                 tg->bytes_disp[rw] = 0;
740
741         if (tg->io_disp[rw] >= io_trim)
742                 tg->io_disp[rw] -= io_trim;
743         else
744                 tg->io_disp[rw] = 0;
745
746         tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
747
748         throtl_log(&tg->service_queue,
749                    "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
750                    rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
751                    tg->slice_start[rw], tg->slice_end[rw], jiffies);
752 }
753
754 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
755                                   u32 iops_limit, unsigned long *wait)
756 {
757         bool rw = bio_data_dir(bio);
758         unsigned int io_allowed;
759         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
760         u64 tmp;
761
762         if (iops_limit == UINT_MAX) {
763                 if (wait)
764                         *wait = 0;
765                 return true;
766         }
767
768         jiffy_elapsed = jiffies - tg->slice_start[rw];
769
770         /* Round up to the next throttle slice, wait time must be nonzero */
771         jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
772
773         /*
774          * jiffy_elapsed_rnd should not be a big value as minimum iops can be
775          * 1 then at max jiffy elapsed should be equivalent of 1 second as we
776          * will allow dispatch after 1 second and after that slice should
777          * have been trimmed.
778          */
779
780         tmp = (u64)iops_limit * jiffy_elapsed_rnd;
781         do_div(tmp, HZ);
782
783         if (tmp > UINT_MAX)
784                 io_allowed = UINT_MAX;
785         else
786                 io_allowed = tmp;
787
788         if (tg->io_disp[rw] + 1 <= io_allowed) {
789                 if (wait)
790                         *wait = 0;
791                 return true;
792         }
793
794         /* Calc approx time to dispatch */
795         jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
796
797         if (wait)
798                 *wait = jiffy_wait;
799         return false;
800 }
801
802 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
803                                  u64 bps_limit, unsigned long *wait)
804 {
805         bool rw = bio_data_dir(bio);
806         u64 bytes_allowed, extra_bytes, tmp;
807         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
808         unsigned int bio_size = throtl_bio_data_size(bio);
809
810         if (bps_limit == U64_MAX) {
811                 if (wait)
812                         *wait = 0;
813                 return true;
814         }
815
816         jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
817
818         /* Slice has just started. Consider one slice interval */
819         if (!jiffy_elapsed)
820                 jiffy_elapsed_rnd = tg->td->throtl_slice;
821
822         jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
823
824         tmp = bps_limit * jiffy_elapsed_rnd;
825         do_div(tmp, HZ);
826         bytes_allowed = tmp;
827
828         if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
829                 if (wait)
830                         *wait = 0;
831                 return true;
832         }
833
834         /* Calc approx time to dispatch */
835         extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
836         jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
837
838         if (!jiffy_wait)
839                 jiffy_wait = 1;
840
841         /*
842          * This wait time is without taking into consideration the rounding
843          * up we did. Add that time also.
844          */
845         jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
846         if (wait)
847                 *wait = jiffy_wait;
848         return false;
849 }
850
851 /*
852  * Returns whether one can dispatch a bio or not. Also returns approx number
853  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
854  */
855 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
856                             unsigned long *wait)
857 {
858         bool rw = bio_data_dir(bio);
859         unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
860         u64 bps_limit = tg_bps_limit(tg, rw);
861         u32 iops_limit = tg_iops_limit(tg, rw);
862
863         /*
864          * Currently whole state machine of group depends on first bio
865          * queued in the group bio list. So one should not be calling
866          * this function with a different bio if there are other bios
867          * queued.
868          */
869         BUG_ON(tg->service_queue.nr_queued[rw] &&
870                bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
871
872         /* If tg->bps = -1, then BW is unlimited */
873         if (bps_limit == U64_MAX && iops_limit == UINT_MAX) {
874                 if (wait)
875                         *wait = 0;
876                 return true;
877         }
878
879         /*
880          * If previous slice expired, start a new one otherwise renew/extend
881          * existing slice to make sure it is at least throtl_slice interval
882          * long since now. New slice is started only for empty throttle group.
883          * If there is queued bio, that means there should be an active
884          * slice and it should be extended instead.
885          */
886         if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
887                 throtl_start_new_slice(tg, rw);
888         else {
889                 if (time_before(tg->slice_end[rw],
890                     jiffies + tg->td->throtl_slice))
891                         throtl_extend_slice(tg, rw,
892                                 jiffies + tg->td->throtl_slice);
893         }
894
895         if (iops_limit != UINT_MAX)
896                 tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
897
898         if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
899             tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
900                 if (wait)
901                         *wait = 0;
902                 return true;
903         }
904
905         max_wait = max(bps_wait, iops_wait);
906
907         if (wait)
908                 *wait = max_wait;
909
910         if (time_before(tg->slice_end[rw], jiffies + max_wait))
911                 throtl_extend_slice(tg, rw, jiffies + max_wait);
912
913         return false;
914 }
915
916 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
917 {
918         bool rw = bio_data_dir(bio);
919         unsigned int bio_size = throtl_bio_data_size(bio);
920
921         /* Charge the bio to the group */
922         tg->bytes_disp[rw] += bio_size;
923         tg->io_disp[rw]++;
924         tg->last_bytes_disp[rw] += bio_size;
925         tg->last_io_disp[rw]++;
926
927         /*
928          * BIO_THROTTLED is used to prevent the same bio to be throttled
929          * more than once as a throttled bio will go through blk-throtl the
930          * second time when it eventually gets issued.  Set it when a bio
931          * is being charged to a tg.
932          */
933         if (!bio_flagged(bio, BIO_THROTTLED))
934                 bio_set_flag(bio, BIO_THROTTLED);
935 }
936
937 /**
938  * throtl_add_bio_tg - add a bio to the specified throtl_grp
939  * @bio: bio to add
940  * @qn: qnode to use
941  * @tg: the target throtl_grp
942  *
943  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
944  * tg->qnode_on_self[] is used.
945  */
946 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
947                               struct throtl_grp *tg)
948 {
949         struct throtl_service_queue *sq = &tg->service_queue;
950         bool rw = bio_data_dir(bio);
951
952         if (!qn)
953                 qn = &tg->qnode_on_self[rw];
954
955         /*
956          * If @tg doesn't currently have any bios queued in the same
957          * direction, queueing @bio can change when @tg should be
958          * dispatched.  Mark that @tg was empty.  This is automatically
959          * cleared on the next tg_update_disptime().
960          */
961         if (!sq->nr_queued[rw])
962                 tg->flags |= THROTL_TG_WAS_EMPTY;
963
964         throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
965
966         sq->nr_queued[rw]++;
967         throtl_enqueue_tg(tg);
968 }
969
970 static void tg_update_disptime(struct throtl_grp *tg)
971 {
972         struct throtl_service_queue *sq = &tg->service_queue;
973         unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
974         struct bio *bio;
975
976         bio = throtl_peek_queued(&sq->queued[READ]);
977         if (bio)
978                 tg_may_dispatch(tg, bio, &read_wait);
979
980         bio = throtl_peek_queued(&sq->queued[WRITE]);
981         if (bio)
982                 tg_may_dispatch(tg, bio, &write_wait);
983
984         min_wait = min(read_wait, write_wait);
985         disptime = jiffies + min_wait;
986
987         /* Update dispatch time */
988         throtl_dequeue_tg(tg);
989         tg->disptime = disptime;
990         throtl_enqueue_tg(tg);
991
992         /* see throtl_add_bio_tg() */
993         tg->flags &= ~THROTL_TG_WAS_EMPTY;
994 }
995
996 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
997                                         struct throtl_grp *parent_tg, bool rw)
998 {
999         if (throtl_slice_used(parent_tg, rw)) {
1000                 throtl_start_new_slice_with_credit(parent_tg, rw,
1001                                 child_tg->slice_start[rw]);
1002         }
1003
1004 }
1005
1006 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1007 {
1008         struct throtl_service_queue *sq = &tg->service_queue;
1009         struct throtl_service_queue *parent_sq = sq->parent_sq;
1010         struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1011         struct throtl_grp *tg_to_put = NULL;
1012         struct bio *bio;
1013
1014         /*
1015          * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1016          * from @tg may put its reference and @parent_sq might end up
1017          * getting released prematurely.  Remember the tg to put and put it
1018          * after @bio is transferred to @parent_sq.
1019          */
1020         bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1021         sq->nr_queued[rw]--;
1022
1023         throtl_charge_bio(tg, bio);
1024
1025         /*
1026          * If our parent is another tg, we just need to transfer @bio to
1027          * the parent using throtl_add_bio_tg().  If our parent is
1028          * @td->service_queue, @bio is ready to be issued.  Put it on its
1029          * bio_lists[] and decrease total number queued.  The caller is
1030          * responsible for issuing these bios.
1031          */
1032         if (parent_tg) {
1033                 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1034                 start_parent_slice_with_credit(tg, parent_tg, rw);
1035         } else {
1036                 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1037                                      &parent_sq->queued[rw]);
1038                 BUG_ON(tg->td->nr_queued[rw] <= 0);
1039                 tg->td->nr_queued[rw]--;
1040         }
1041
1042         throtl_trim_slice(tg, rw);
1043
1044         if (tg_to_put)
1045                 blkg_put(tg_to_blkg(tg_to_put));
1046 }
1047
1048 static int throtl_dispatch_tg(struct throtl_grp *tg)
1049 {
1050         struct throtl_service_queue *sq = &tg->service_queue;
1051         unsigned int nr_reads = 0, nr_writes = 0;
1052         unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
1053         unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
1054         struct bio *bio;
1055
1056         /* Try to dispatch 75% READS and 25% WRITES */
1057
1058         while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1059                tg_may_dispatch(tg, bio, NULL)) {
1060
1061                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1062                 nr_reads++;
1063
1064                 if (nr_reads >= max_nr_reads)
1065                         break;
1066         }
1067
1068         while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1069                tg_may_dispatch(tg, bio, NULL)) {
1070
1071                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1072                 nr_writes++;
1073
1074                 if (nr_writes >= max_nr_writes)
1075                         break;
1076         }
1077
1078         return nr_reads + nr_writes;
1079 }
1080
1081 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1082 {
1083         unsigned int nr_disp = 0;
1084
1085         while (1) {
1086                 struct throtl_grp *tg;
1087                 struct throtl_service_queue *sq;
1088
1089                 if (!parent_sq->nr_pending)
1090                         break;
1091
1092                 tg = throtl_rb_first(parent_sq);
1093                 if (!tg)
1094                         break;
1095
1096                 if (time_before(jiffies, tg->disptime))
1097                         break;
1098
1099                 throtl_dequeue_tg(tg);
1100
1101                 nr_disp += throtl_dispatch_tg(tg);
1102
1103                 sq = &tg->service_queue;
1104                 if (sq->nr_queued[0] || sq->nr_queued[1])
1105                         tg_update_disptime(tg);
1106
1107                 if (nr_disp >= THROTL_QUANTUM)
1108                         break;
1109         }
1110
1111         return nr_disp;
1112 }
1113
1114 static bool throtl_can_upgrade(struct throtl_data *td,
1115         struct throtl_grp *this_tg);
1116 /**
1117  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1118  * @t: the pending_timer member of the throtl_service_queue being serviced
1119  *
1120  * This timer is armed when a child throtl_grp with active bio's become
1121  * pending and queued on the service_queue's pending_tree and expires when
1122  * the first child throtl_grp should be dispatched.  This function
1123  * dispatches bio's from the children throtl_grps to the parent
1124  * service_queue.
1125  *
1126  * If the parent's parent is another throtl_grp, dispatching is propagated
1127  * by either arming its pending_timer or repeating dispatch directly.  If
1128  * the top-level service_tree is reached, throtl_data->dispatch_work is
1129  * kicked so that the ready bio's are issued.
1130  */
1131 static void throtl_pending_timer_fn(struct timer_list *t)
1132 {
1133         struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1134         struct throtl_grp *tg = sq_to_tg(sq);
1135         struct throtl_data *td = sq_to_td(sq);
1136         struct request_queue *q = td->queue;
1137         struct throtl_service_queue *parent_sq;
1138         bool dispatched;
1139         int ret;
1140
1141         spin_lock_irq(&q->queue_lock);
1142         if (throtl_can_upgrade(td, NULL))
1143                 throtl_upgrade_state(td);
1144
1145 again:
1146         parent_sq = sq->parent_sq;
1147         dispatched = false;
1148
1149         while (true) {
1150                 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1151                            sq->nr_queued[READ] + sq->nr_queued[WRITE],
1152                            sq->nr_queued[READ], sq->nr_queued[WRITE]);
1153
1154                 ret = throtl_select_dispatch(sq);
1155                 if (ret) {
1156                         throtl_log(sq, "bios disp=%u", ret);
1157                         dispatched = true;
1158                 }
1159
1160                 if (throtl_schedule_next_dispatch(sq, false))
1161                         break;
1162
1163                 /* this dispatch windows is still open, relax and repeat */
1164                 spin_unlock_irq(&q->queue_lock);
1165                 cpu_relax();
1166                 spin_lock_irq(&q->queue_lock);
1167         }
1168
1169         if (!dispatched)
1170                 goto out_unlock;
1171
1172         if (parent_sq) {
1173                 /* @parent_sq is another throl_grp, propagate dispatch */
1174                 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1175                         tg_update_disptime(tg);
1176                         if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1177                                 /* window is already open, repeat dispatching */
1178                                 sq = parent_sq;
1179                                 tg = sq_to_tg(sq);
1180                                 goto again;
1181                         }
1182                 }
1183         } else {
1184                 /* reached the top-level, queue issuing */
1185                 queue_work(kthrotld_workqueue, &td->dispatch_work);
1186         }
1187 out_unlock:
1188         spin_unlock_irq(&q->queue_lock);
1189 }
1190
1191 /**
1192  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1193  * @work: work item being executed
1194  *
1195  * This function is queued for execution when bios reach the bio_lists[]
1196  * of throtl_data->service_queue.  Those bios are ready and issued by this
1197  * function.
1198  */
1199 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1200 {
1201         struct throtl_data *td = container_of(work, struct throtl_data,
1202                                               dispatch_work);
1203         struct throtl_service_queue *td_sq = &td->service_queue;
1204         struct request_queue *q = td->queue;
1205         struct bio_list bio_list_on_stack;
1206         struct bio *bio;
1207         struct blk_plug plug;
1208         int rw;
1209
1210         bio_list_init(&bio_list_on_stack);
1211
1212         spin_lock_irq(&q->queue_lock);
1213         for (rw = READ; rw <= WRITE; rw++)
1214                 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1215                         bio_list_add(&bio_list_on_stack, bio);
1216         spin_unlock_irq(&q->queue_lock);
1217
1218         if (!bio_list_empty(&bio_list_on_stack)) {
1219                 blk_start_plug(&plug);
1220                 while ((bio = bio_list_pop(&bio_list_on_stack)))
1221                         submit_bio_noacct(bio);
1222                 blk_finish_plug(&plug);
1223         }
1224 }
1225
1226 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1227                               int off)
1228 {
1229         struct throtl_grp *tg = pd_to_tg(pd);
1230         u64 v = *(u64 *)((void *)tg + off);
1231
1232         if (v == U64_MAX)
1233                 return 0;
1234         return __blkg_prfill_u64(sf, pd, v);
1235 }
1236
1237 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1238                                int off)
1239 {
1240         struct throtl_grp *tg = pd_to_tg(pd);
1241         unsigned int v = *(unsigned int *)((void *)tg + off);
1242
1243         if (v == UINT_MAX)
1244                 return 0;
1245         return __blkg_prfill_u64(sf, pd, v);
1246 }
1247
1248 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1249 {
1250         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1251                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1252         return 0;
1253 }
1254
1255 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1256 {
1257         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1258                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1259         return 0;
1260 }
1261
1262 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1263 {
1264         struct throtl_service_queue *sq = &tg->service_queue;
1265         struct cgroup_subsys_state *pos_css;
1266         struct blkcg_gq *blkg;
1267
1268         throtl_log(&tg->service_queue,
1269                    "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1270                    tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1271                    tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1272
1273         /*
1274          * Update has_rules[] flags for the updated tg's subtree.  A tg is
1275          * considered to have rules if either the tg itself or any of its
1276          * ancestors has rules.  This identifies groups without any
1277          * restrictions in the whole hierarchy and allows them to bypass
1278          * blk-throttle.
1279          */
1280         blkg_for_each_descendant_pre(blkg, pos_css,
1281                         global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1282                 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1283                 struct throtl_grp *parent_tg;
1284
1285                 tg_update_has_rules(this_tg);
1286                 /* ignore root/second level */
1287                 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1288                     !blkg->parent->parent)
1289                         continue;
1290                 parent_tg = blkg_to_tg(blkg->parent);
1291                 /*
1292                  * make sure all children has lower idle time threshold and
1293                  * higher latency target
1294                  */
1295                 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1296                                 parent_tg->idletime_threshold);
1297                 this_tg->latency_target = max(this_tg->latency_target,
1298                                 parent_tg->latency_target);
1299         }
1300
1301         /*
1302          * We're already holding queue_lock and know @tg is valid.  Let's
1303          * apply the new config directly.
1304          *
1305          * Restart the slices for both READ and WRITES. It might happen
1306          * that a group's limit are dropped suddenly and we don't want to
1307          * account recently dispatched IO with new low rate.
1308          */
1309         throtl_start_new_slice(tg, READ);
1310         throtl_start_new_slice(tg, WRITE);
1311
1312         if (tg->flags & THROTL_TG_PENDING) {
1313                 tg_update_disptime(tg);
1314                 throtl_schedule_next_dispatch(sq->parent_sq, true);
1315         }
1316 }
1317
1318 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1319                            char *buf, size_t nbytes, loff_t off, bool is_u64)
1320 {
1321         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1322         struct blkg_conf_ctx ctx;
1323         struct throtl_grp *tg;
1324         int ret;
1325         u64 v;
1326
1327         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1328         if (ret)
1329                 return ret;
1330
1331         ret = -EINVAL;
1332         if (sscanf(ctx.body, "%llu", &v) != 1)
1333                 goto out_finish;
1334         if (!v)
1335                 v = U64_MAX;
1336
1337         tg = blkg_to_tg(ctx.blkg);
1338
1339         if (is_u64)
1340                 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1341         else
1342                 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1343
1344         tg_conf_updated(tg, false);
1345         ret = 0;
1346 out_finish:
1347         blkg_conf_finish(&ctx);
1348         return ret ?: nbytes;
1349 }
1350
1351 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1352                                char *buf, size_t nbytes, loff_t off)
1353 {
1354         return tg_set_conf(of, buf, nbytes, off, true);
1355 }
1356
1357 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1358                                 char *buf, size_t nbytes, loff_t off)
1359 {
1360         return tg_set_conf(of, buf, nbytes, off, false);
1361 }
1362
1363 static int tg_print_rwstat(struct seq_file *sf, void *v)
1364 {
1365         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1366                           blkg_prfill_rwstat, &blkcg_policy_throtl,
1367                           seq_cft(sf)->private, true);
1368         return 0;
1369 }
1370
1371 static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1372                                       struct blkg_policy_data *pd, int off)
1373 {
1374         struct blkg_rwstat_sample sum;
1375
1376         blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1377                                   &sum);
1378         return __blkg_prfill_rwstat(sf, pd, &sum);
1379 }
1380
1381 static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
1382 {
1383         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1384                           tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1385                           seq_cft(sf)->private, true);
1386         return 0;
1387 }
1388
1389 static struct cftype throtl_legacy_files[] = {
1390         {
1391                 .name = "throttle.read_bps_device",
1392                 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1393                 .seq_show = tg_print_conf_u64,
1394                 .write = tg_set_conf_u64,
1395         },
1396         {
1397                 .name = "throttle.write_bps_device",
1398                 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1399                 .seq_show = tg_print_conf_u64,
1400                 .write = tg_set_conf_u64,
1401         },
1402         {
1403                 .name = "throttle.read_iops_device",
1404                 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1405                 .seq_show = tg_print_conf_uint,
1406                 .write = tg_set_conf_uint,
1407         },
1408         {
1409                 .name = "throttle.write_iops_device",
1410                 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1411                 .seq_show = tg_print_conf_uint,
1412                 .write = tg_set_conf_uint,
1413         },
1414         {
1415                 .name = "throttle.io_service_bytes",
1416                 .private = offsetof(struct throtl_grp, stat_bytes),
1417                 .seq_show = tg_print_rwstat,
1418         },
1419         {
1420                 .name = "throttle.io_service_bytes_recursive",
1421                 .private = offsetof(struct throtl_grp, stat_bytes),
1422                 .seq_show = tg_print_rwstat_recursive,
1423         },
1424         {
1425                 .name = "throttle.io_serviced",
1426                 .private = offsetof(struct throtl_grp, stat_ios),
1427                 .seq_show = tg_print_rwstat,
1428         },
1429         {
1430                 .name = "throttle.io_serviced_recursive",
1431                 .private = offsetof(struct throtl_grp, stat_ios),
1432                 .seq_show = tg_print_rwstat_recursive,
1433         },
1434         { }     /* terminate */
1435 };
1436
1437 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1438                          int off)
1439 {
1440         struct throtl_grp *tg = pd_to_tg(pd);
1441         const char *dname = blkg_dev_name(pd->blkg);
1442         char bufs[4][21] = { "max", "max", "max", "max" };
1443         u64 bps_dft;
1444         unsigned int iops_dft;
1445         char idle_time[26] = "";
1446         char latency_time[26] = "";
1447
1448         if (!dname)
1449                 return 0;
1450
1451         if (off == LIMIT_LOW) {
1452                 bps_dft = 0;
1453                 iops_dft = 0;
1454         } else {
1455                 bps_dft = U64_MAX;
1456                 iops_dft = UINT_MAX;
1457         }
1458
1459         if (tg->bps_conf[READ][off] == bps_dft &&
1460             tg->bps_conf[WRITE][off] == bps_dft &&
1461             tg->iops_conf[READ][off] == iops_dft &&
1462             tg->iops_conf[WRITE][off] == iops_dft &&
1463             (off != LIMIT_LOW ||
1464              (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1465               tg->latency_target_conf == DFL_LATENCY_TARGET)))
1466                 return 0;
1467
1468         if (tg->bps_conf[READ][off] != U64_MAX)
1469                 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1470                         tg->bps_conf[READ][off]);
1471         if (tg->bps_conf[WRITE][off] != U64_MAX)
1472                 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1473                         tg->bps_conf[WRITE][off]);
1474         if (tg->iops_conf[READ][off] != UINT_MAX)
1475                 snprintf(bufs[2], sizeof(bufs[2]), "%u",
1476                         tg->iops_conf[READ][off]);
1477         if (tg->iops_conf[WRITE][off] != UINT_MAX)
1478                 snprintf(bufs[3], sizeof(bufs[3]), "%u",
1479                         tg->iops_conf[WRITE][off]);
1480         if (off == LIMIT_LOW) {
1481                 if (tg->idletime_threshold_conf == ULONG_MAX)
1482                         strcpy(idle_time, " idle=max");
1483                 else
1484                         snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1485                                 tg->idletime_threshold_conf);
1486
1487                 if (tg->latency_target_conf == ULONG_MAX)
1488                         strcpy(latency_time, " latency=max");
1489                 else
1490                         snprintf(latency_time, sizeof(latency_time),
1491                                 " latency=%lu", tg->latency_target_conf);
1492         }
1493
1494         seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1495                    dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1496                    latency_time);
1497         return 0;
1498 }
1499
1500 static int tg_print_limit(struct seq_file *sf, void *v)
1501 {
1502         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1503                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1504         return 0;
1505 }
1506
1507 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1508                           char *buf, size_t nbytes, loff_t off)
1509 {
1510         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1511         struct blkg_conf_ctx ctx;
1512         struct throtl_grp *tg;
1513         u64 v[4];
1514         unsigned long idle_time;
1515         unsigned long latency_time;
1516         int ret;
1517         int index = of_cft(of)->private;
1518
1519         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1520         if (ret)
1521                 return ret;
1522
1523         tg = blkg_to_tg(ctx.blkg);
1524
1525         v[0] = tg->bps_conf[READ][index];
1526         v[1] = tg->bps_conf[WRITE][index];
1527         v[2] = tg->iops_conf[READ][index];
1528         v[3] = tg->iops_conf[WRITE][index];
1529
1530         idle_time = tg->idletime_threshold_conf;
1531         latency_time = tg->latency_target_conf;
1532         while (true) {
1533                 char tok[27];   /* wiops=18446744073709551616 */
1534                 char *p;
1535                 u64 val = U64_MAX;
1536                 int len;
1537
1538                 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1539                         break;
1540                 if (tok[0] == '\0')
1541                         break;
1542                 ctx.body += len;
1543
1544                 ret = -EINVAL;
1545                 p = tok;
1546                 strsep(&p, "=");
1547                 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1548                         goto out_finish;
1549
1550                 ret = -ERANGE;
1551                 if (!val)
1552                         goto out_finish;
1553
1554                 ret = -EINVAL;
1555                 if (!strcmp(tok, "rbps") && val > 1)
1556                         v[0] = val;
1557                 else if (!strcmp(tok, "wbps") && val > 1)
1558                         v[1] = val;
1559                 else if (!strcmp(tok, "riops") && val > 1)
1560                         v[2] = min_t(u64, val, UINT_MAX);
1561                 else if (!strcmp(tok, "wiops") && val > 1)
1562                         v[3] = min_t(u64, val, UINT_MAX);
1563                 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1564                         idle_time = val;
1565                 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1566                         latency_time = val;
1567                 else
1568                         goto out_finish;
1569         }
1570
1571         tg->bps_conf[READ][index] = v[0];
1572         tg->bps_conf[WRITE][index] = v[1];
1573         tg->iops_conf[READ][index] = v[2];
1574         tg->iops_conf[WRITE][index] = v[3];
1575
1576         if (index == LIMIT_MAX) {
1577                 tg->bps[READ][index] = v[0];
1578                 tg->bps[WRITE][index] = v[1];
1579                 tg->iops[READ][index] = v[2];
1580                 tg->iops[WRITE][index] = v[3];
1581         }
1582         tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1583                 tg->bps_conf[READ][LIMIT_MAX]);
1584         tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1585                 tg->bps_conf[WRITE][LIMIT_MAX]);
1586         tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1587                 tg->iops_conf[READ][LIMIT_MAX]);
1588         tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1589                 tg->iops_conf[WRITE][LIMIT_MAX]);
1590         tg->idletime_threshold_conf = idle_time;
1591         tg->latency_target_conf = latency_time;
1592
1593         /* force user to configure all settings for low limit  */
1594         if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1595               tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1596             tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1597             tg->latency_target_conf == DFL_LATENCY_TARGET) {
1598                 tg->bps[READ][LIMIT_LOW] = 0;
1599                 tg->bps[WRITE][LIMIT_LOW] = 0;
1600                 tg->iops[READ][LIMIT_LOW] = 0;
1601                 tg->iops[WRITE][LIMIT_LOW] = 0;
1602                 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1603                 tg->latency_target = DFL_LATENCY_TARGET;
1604         } else if (index == LIMIT_LOW) {
1605                 tg->idletime_threshold = tg->idletime_threshold_conf;
1606                 tg->latency_target = tg->latency_target_conf;
1607         }
1608
1609         blk_throtl_update_limit_valid(tg->td);
1610         if (tg->td->limit_valid[LIMIT_LOW]) {
1611                 if (index == LIMIT_LOW)
1612                         tg->td->limit_index = LIMIT_LOW;
1613         } else
1614                 tg->td->limit_index = LIMIT_MAX;
1615         tg_conf_updated(tg, index == LIMIT_LOW &&
1616                 tg->td->limit_valid[LIMIT_LOW]);
1617         ret = 0;
1618 out_finish:
1619         blkg_conf_finish(&ctx);
1620         return ret ?: nbytes;
1621 }
1622
1623 static struct cftype throtl_files[] = {
1624 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1625         {
1626                 .name = "low",
1627                 .flags = CFTYPE_NOT_ON_ROOT,
1628                 .seq_show = tg_print_limit,
1629                 .write = tg_set_limit,
1630                 .private = LIMIT_LOW,
1631         },
1632 #endif
1633         {
1634                 .name = "max",
1635                 .flags = CFTYPE_NOT_ON_ROOT,
1636                 .seq_show = tg_print_limit,
1637                 .write = tg_set_limit,
1638                 .private = LIMIT_MAX,
1639         },
1640         { }     /* terminate */
1641 };
1642
1643 static void throtl_shutdown_wq(struct request_queue *q)
1644 {
1645         struct throtl_data *td = q->td;
1646
1647         cancel_work_sync(&td->dispatch_work);
1648 }
1649
1650 struct blkcg_policy blkcg_policy_throtl = {
1651         .dfl_cftypes            = throtl_files,
1652         .legacy_cftypes         = throtl_legacy_files,
1653
1654         .pd_alloc_fn            = throtl_pd_alloc,
1655         .pd_init_fn             = throtl_pd_init,
1656         .pd_online_fn           = throtl_pd_online,
1657         .pd_offline_fn          = throtl_pd_offline,
1658         .pd_free_fn             = throtl_pd_free,
1659 };
1660
1661 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1662 {
1663         unsigned long rtime = jiffies, wtime = jiffies;
1664
1665         if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1666                 rtime = tg->last_low_overflow_time[READ];
1667         if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1668                 wtime = tg->last_low_overflow_time[WRITE];
1669         return min(rtime, wtime);
1670 }
1671
1672 /* tg should not be an intermediate node */
1673 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1674 {
1675         struct throtl_service_queue *parent_sq;
1676         struct throtl_grp *parent = tg;
1677         unsigned long ret = __tg_last_low_overflow_time(tg);
1678
1679         while (true) {
1680                 parent_sq = parent->service_queue.parent_sq;
1681                 parent = sq_to_tg(parent_sq);
1682                 if (!parent)
1683                         break;
1684
1685                 /*
1686                  * The parent doesn't have low limit, it always reaches low
1687                  * limit. Its overflow time is useless for children
1688                  */
1689                 if (!parent->bps[READ][LIMIT_LOW] &&
1690                     !parent->iops[READ][LIMIT_LOW] &&
1691                     !parent->bps[WRITE][LIMIT_LOW] &&
1692                     !parent->iops[WRITE][LIMIT_LOW])
1693                         continue;
1694                 if (time_after(__tg_last_low_overflow_time(parent), ret))
1695                         ret = __tg_last_low_overflow_time(parent);
1696         }
1697         return ret;
1698 }
1699
1700 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1701 {
1702         /*
1703          * cgroup is idle if:
1704          * - single idle is too long, longer than a fixed value (in case user
1705          *   configure a too big threshold) or 4 times of idletime threshold
1706          * - average think time is more than threshold
1707          * - IO latency is largely below threshold
1708          */
1709         unsigned long time;
1710         bool ret;
1711
1712         time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1713         ret = tg->latency_target == DFL_LATENCY_TARGET ||
1714               tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1715               (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1716               tg->avg_idletime > tg->idletime_threshold ||
1717               (tg->latency_target && tg->bio_cnt &&
1718                 tg->bad_bio_cnt * 5 < tg->bio_cnt);
1719         throtl_log(&tg->service_queue,
1720                 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1721                 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1722                 tg->bio_cnt, ret, tg->td->scale);
1723         return ret;
1724 }
1725
1726 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1727 {
1728         struct throtl_service_queue *sq = &tg->service_queue;
1729         bool read_limit, write_limit;
1730
1731         /*
1732          * if cgroup reaches low limit (if low limit is 0, the cgroup always
1733          * reaches), it's ok to upgrade to next limit
1734          */
1735         read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1736         write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1737         if (!read_limit && !write_limit)
1738                 return true;
1739         if (read_limit && sq->nr_queued[READ] &&
1740             (!write_limit || sq->nr_queued[WRITE]))
1741                 return true;
1742         if (write_limit && sq->nr_queued[WRITE] &&
1743             (!read_limit || sq->nr_queued[READ]))
1744                 return true;
1745
1746         if (time_after_eq(jiffies,
1747                 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1748             throtl_tg_is_idle(tg))
1749                 return true;
1750         return false;
1751 }
1752
1753 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1754 {
1755         while (true) {
1756                 if (throtl_tg_can_upgrade(tg))
1757                         return true;
1758                 tg = sq_to_tg(tg->service_queue.parent_sq);
1759                 if (!tg || !tg_to_blkg(tg)->parent)
1760                         return false;
1761         }
1762         return false;
1763 }
1764
1765 static bool throtl_can_upgrade(struct throtl_data *td,
1766         struct throtl_grp *this_tg)
1767 {
1768         struct cgroup_subsys_state *pos_css;
1769         struct blkcg_gq *blkg;
1770
1771         if (td->limit_index != LIMIT_LOW)
1772                 return false;
1773
1774         if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1775                 return false;
1776
1777         rcu_read_lock();
1778         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1779                 struct throtl_grp *tg = blkg_to_tg(blkg);
1780
1781                 if (tg == this_tg)
1782                         continue;
1783                 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1784                         continue;
1785                 if (!throtl_hierarchy_can_upgrade(tg)) {
1786                         rcu_read_unlock();
1787                         return false;
1788                 }
1789         }
1790         rcu_read_unlock();
1791         return true;
1792 }
1793
1794 static void throtl_upgrade_check(struct throtl_grp *tg)
1795 {
1796         unsigned long now = jiffies;
1797
1798         if (tg->td->limit_index != LIMIT_LOW)
1799                 return;
1800
1801         if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1802                 return;
1803
1804         tg->last_check_time = now;
1805
1806         if (!time_after_eq(now,
1807              __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1808                 return;
1809
1810         if (throtl_can_upgrade(tg->td, NULL))
1811                 throtl_upgrade_state(tg->td);
1812 }
1813
1814 static void throtl_upgrade_state(struct throtl_data *td)
1815 {
1816         struct cgroup_subsys_state *pos_css;
1817         struct blkcg_gq *blkg;
1818
1819         throtl_log(&td->service_queue, "upgrade to max");
1820         td->limit_index = LIMIT_MAX;
1821         td->low_upgrade_time = jiffies;
1822         td->scale = 0;
1823         rcu_read_lock();
1824         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1825                 struct throtl_grp *tg = blkg_to_tg(blkg);
1826                 struct throtl_service_queue *sq = &tg->service_queue;
1827
1828                 tg->disptime = jiffies - 1;
1829                 throtl_select_dispatch(sq);
1830                 throtl_schedule_next_dispatch(sq, true);
1831         }
1832         rcu_read_unlock();
1833         throtl_select_dispatch(&td->service_queue);
1834         throtl_schedule_next_dispatch(&td->service_queue, true);
1835         queue_work(kthrotld_workqueue, &td->dispatch_work);
1836 }
1837
1838 static void throtl_downgrade_state(struct throtl_data *td)
1839 {
1840         td->scale /= 2;
1841
1842         throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1843         if (td->scale) {
1844                 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1845                 return;
1846         }
1847
1848         td->limit_index = LIMIT_LOW;
1849         td->low_downgrade_time = jiffies;
1850 }
1851
1852 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1853 {
1854         struct throtl_data *td = tg->td;
1855         unsigned long now = jiffies;
1856
1857         /*
1858          * If cgroup is below low limit, consider downgrade and throttle other
1859          * cgroups
1860          */
1861         if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1862             time_after_eq(now, tg_last_low_overflow_time(tg) +
1863                                         td->throtl_slice) &&
1864             (!throtl_tg_is_idle(tg) ||
1865              !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1866                 return true;
1867         return false;
1868 }
1869
1870 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1871 {
1872         while (true) {
1873                 if (!throtl_tg_can_downgrade(tg))
1874                         return false;
1875                 tg = sq_to_tg(tg->service_queue.parent_sq);
1876                 if (!tg || !tg_to_blkg(tg)->parent)
1877                         break;
1878         }
1879         return true;
1880 }
1881
1882 static void throtl_downgrade_check(struct throtl_grp *tg)
1883 {
1884         uint64_t bps;
1885         unsigned int iops;
1886         unsigned long elapsed_time;
1887         unsigned long now = jiffies;
1888
1889         if (tg->td->limit_index != LIMIT_MAX ||
1890             !tg->td->limit_valid[LIMIT_LOW])
1891                 return;
1892         if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1893                 return;
1894         if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1895                 return;
1896
1897         elapsed_time = now - tg->last_check_time;
1898         tg->last_check_time = now;
1899
1900         if (time_before(now, tg_last_low_overflow_time(tg) +
1901                         tg->td->throtl_slice))
1902                 return;
1903
1904         if (tg->bps[READ][LIMIT_LOW]) {
1905                 bps = tg->last_bytes_disp[READ] * HZ;
1906                 do_div(bps, elapsed_time);
1907                 if (bps >= tg->bps[READ][LIMIT_LOW])
1908                         tg->last_low_overflow_time[READ] = now;
1909         }
1910
1911         if (tg->bps[WRITE][LIMIT_LOW]) {
1912                 bps = tg->last_bytes_disp[WRITE] * HZ;
1913                 do_div(bps, elapsed_time);
1914                 if (bps >= tg->bps[WRITE][LIMIT_LOW])
1915                         tg->last_low_overflow_time[WRITE] = now;
1916         }
1917
1918         if (tg->iops[READ][LIMIT_LOW]) {
1919                 tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
1920                 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
1921                 if (iops >= tg->iops[READ][LIMIT_LOW])
1922                         tg->last_low_overflow_time[READ] = now;
1923         }
1924
1925         if (tg->iops[WRITE][LIMIT_LOW]) {
1926                 tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
1927                 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
1928                 if (iops >= tg->iops[WRITE][LIMIT_LOW])
1929                         tg->last_low_overflow_time[WRITE] = now;
1930         }
1931
1932         /*
1933          * If cgroup is below low limit, consider downgrade and throttle other
1934          * cgroups
1935          */
1936         if (throtl_hierarchy_can_downgrade(tg))
1937                 throtl_downgrade_state(tg->td);
1938
1939         tg->last_bytes_disp[READ] = 0;
1940         tg->last_bytes_disp[WRITE] = 0;
1941         tg->last_io_disp[READ] = 0;
1942         tg->last_io_disp[WRITE] = 0;
1943 }
1944
1945 static void blk_throtl_update_idletime(struct throtl_grp *tg)
1946 {
1947         unsigned long now;
1948         unsigned long last_finish_time = tg->last_finish_time;
1949
1950         if (last_finish_time == 0)
1951                 return;
1952
1953         now = ktime_get_ns() >> 10;
1954         if (now <= last_finish_time ||
1955             last_finish_time == tg->checked_last_finish_time)
1956                 return;
1957
1958         tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
1959         tg->checked_last_finish_time = last_finish_time;
1960 }
1961
1962 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1963 static void throtl_update_latency_buckets(struct throtl_data *td)
1964 {
1965         struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
1966         int i, cpu, rw;
1967         unsigned long last_latency[2] = { 0 };
1968         unsigned long latency[2];
1969
1970         if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW])
1971                 return;
1972         if (time_before(jiffies, td->last_calculate_time + HZ))
1973                 return;
1974         td->last_calculate_time = jiffies;
1975
1976         memset(avg_latency, 0, sizeof(avg_latency));
1977         for (rw = READ; rw <= WRITE; rw++) {
1978                 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
1979                         struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
1980
1981                         for_each_possible_cpu(cpu) {
1982                                 struct latency_bucket *bucket;
1983
1984                                 /* this isn't race free, but ok in practice */
1985                                 bucket = per_cpu_ptr(td->latency_buckets[rw],
1986                                         cpu);
1987                                 tmp->total_latency += bucket[i].total_latency;
1988                                 tmp->samples += bucket[i].samples;
1989                                 bucket[i].total_latency = 0;
1990                                 bucket[i].samples = 0;
1991                         }
1992
1993                         if (tmp->samples >= 32) {
1994                                 int samples = tmp->samples;
1995
1996                                 latency[rw] = tmp->total_latency;
1997
1998                                 tmp->total_latency = 0;
1999                                 tmp->samples = 0;
2000                                 latency[rw] /= samples;
2001                                 if (latency[rw] == 0)
2002                                         continue;
2003                                 avg_latency[rw][i].latency = latency[rw];
2004                         }
2005                 }
2006         }
2007
2008         for (rw = READ; rw <= WRITE; rw++) {
2009                 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2010                         if (!avg_latency[rw][i].latency) {
2011                                 if (td->avg_buckets[rw][i].latency < last_latency[rw])
2012                                         td->avg_buckets[rw][i].latency =
2013                                                 last_latency[rw];
2014                                 continue;
2015                         }
2016
2017                         if (!td->avg_buckets[rw][i].valid)
2018                                 latency[rw] = avg_latency[rw][i].latency;
2019                         else
2020                                 latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2021                                         avg_latency[rw][i].latency) >> 3;
2022
2023                         td->avg_buckets[rw][i].latency = max(latency[rw],
2024                                 last_latency[rw]);
2025                         td->avg_buckets[rw][i].valid = true;
2026                         last_latency[rw] = td->avg_buckets[rw][i].latency;
2027                 }
2028         }
2029
2030         for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2031                 throtl_log(&td->service_queue,
2032                         "Latency bucket %d: read latency=%ld, read valid=%d, "
2033                         "write latency=%ld, write valid=%d", i,
2034                         td->avg_buckets[READ][i].latency,
2035                         td->avg_buckets[READ][i].valid,
2036                         td->avg_buckets[WRITE][i].latency,
2037                         td->avg_buckets[WRITE][i].valid);
2038 }
2039 #else
2040 static inline void throtl_update_latency_buckets(struct throtl_data *td)
2041 {
2042 }
2043 #endif
2044
2045 void blk_throtl_charge_bio_split(struct bio *bio)
2046 {
2047         struct blkcg_gq *blkg = bio->bi_blkg;
2048         struct throtl_grp *parent = blkg_to_tg(blkg);
2049         struct throtl_service_queue *parent_sq;
2050         bool rw = bio_data_dir(bio);
2051
2052         do {
2053                 if (!parent->has_rules[rw])
2054                         break;
2055
2056                 atomic_inc(&parent->io_split_cnt[rw]);
2057                 atomic_inc(&parent->last_io_split_cnt[rw]);
2058
2059                 parent_sq = parent->service_queue.parent_sq;
2060                 parent = sq_to_tg(parent_sq);
2061         } while (parent);
2062 }
2063
2064 bool __blk_throtl_bio(struct bio *bio)
2065 {
2066         struct request_queue *q = bio->bi_bdev->bd_disk->queue;
2067         struct blkcg_gq *blkg = bio->bi_blkg;
2068         struct throtl_qnode *qn = NULL;
2069         struct throtl_grp *tg = blkg_to_tg(blkg);
2070         struct throtl_service_queue *sq;
2071         bool rw = bio_data_dir(bio);
2072         bool throttled = false;
2073         struct throtl_data *td = tg->td;
2074
2075         rcu_read_lock();
2076
2077         if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
2078                 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
2079                                 bio->bi_iter.bi_size);
2080                 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
2081         }
2082
2083         spin_lock_irq(&q->queue_lock);
2084
2085         throtl_update_latency_buckets(td);
2086
2087         blk_throtl_update_idletime(tg);
2088
2089         sq = &tg->service_queue;
2090
2091 again:
2092         while (true) {
2093                 if (tg->last_low_overflow_time[rw] == 0)
2094                         tg->last_low_overflow_time[rw] = jiffies;
2095                 throtl_downgrade_check(tg);
2096                 throtl_upgrade_check(tg);
2097                 /* throtl is FIFO - if bios are already queued, should queue */
2098                 if (sq->nr_queued[rw])
2099                         break;
2100
2101                 /* if above limits, break to queue */
2102                 if (!tg_may_dispatch(tg, bio, NULL)) {
2103                         tg->last_low_overflow_time[rw] = jiffies;
2104                         if (throtl_can_upgrade(td, tg)) {
2105                                 throtl_upgrade_state(td);
2106                                 goto again;
2107                         }
2108                         break;
2109                 }
2110
2111                 /* within limits, let's charge and dispatch directly */
2112                 throtl_charge_bio(tg, bio);
2113
2114                 /*
2115                  * We need to trim slice even when bios are not being queued
2116                  * otherwise it might happen that a bio is not queued for
2117                  * a long time and slice keeps on extending and trim is not
2118                  * called for a long time. Now if limits are reduced suddenly
2119                  * we take into account all the IO dispatched so far at new
2120                  * low rate and * newly queued IO gets a really long dispatch
2121                  * time.
2122                  *
2123                  * So keep on trimming slice even if bio is not queued.
2124                  */
2125                 throtl_trim_slice(tg, rw);
2126
2127                 /*
2128                  * @bio passed through this layer without being throttled.
2129                  * Climb up the ladder.  If we're already at the top, it
2130                  * can be executed directly.
2131                  */
2132                 qn = &tg->qnode_on_parent[rw];
2133                 sq = sq->parent_sq;
2134                 tg = sq_to_tg(sq);
2135                 if (!tg)
2136                         goto out_unlock;
2137         }
2138
2139         /* out-of-limit, queue to @tg */
2140         throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2141                    rw == READ ? 'R' : 'W',
2142                    tg->bytes_disp[rw], bio->bi_iter.bi_size,
2143                    tg_bps_limit(tg, rw),
2144                    tg->io_disp[rw], tg_iops_limit(tg, rw),
2145                    sq->nr_queued[READ], sq->nr_queued[WRITE]);
2146
2147         tg->last_low_overflow_time[rw] = jiffies;
2148
2149         td->nr_queued[rw]++;
2150         throtl_add_bio_tg(bio, qn, tg);
2151         throttled = true;
2152
2153         /*
2154          * Update @tg's dispatch time and force schedule dispatch if @tg
2155          * was empty before @bio.  The forced scheduling isn't likely to
2156          * cause undue delay as @bio is likely to be dispatched directly if
2157          * its @tg's disptime is not in the future.
2158          */
2159         if (tg->flags & THROTL_TG_WAS_EMPTY) {
2160                 tg_update_disptime(tg);
2161                 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2162         }
2163
2164 out_unlock:
2165         spin_unlock_irq(&q->queue_lock);
2166         bio_set_flag(bio, BIO_THROTTLED);
2167
2168 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2169         if (throttled || !td->track_bio_latency)
2170                 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
2171 #endif
2172         rcu_read_unlock();
2173         return throttled;
2174 }
2175
2176 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2177 static void throtl_track_latency(struct throtl_data *td, sector_t size,
2178         int op, unsigned long time)
2179 {
2180         struct latency_bucket *latency;
2181         int index;
2182
2183         if (!td || td->limit_index != LIMIT_LOW ||
2184             !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
2185             !blk_queue_nonrot(td->queue))
2186                 return;
2187
2188         index = request_bucket_index(size);
2189
2190         latency = get_cpu_ptr(td->latency_buckets[op]);
2191         latency[index].total_latency += time;
2192         latency[index].samples++;
2193         put_cpu_ptr(td->latency_buckets[op]);
2194 }
2195
2196 void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2197 {
2198         struct request_queue *q = rq->q;
2199         struct throtl_data *td = q->td;
2200
2201         throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
2202                              time_ns >> 10);
2203 }
2204
2205 void blk_throtl_bio_endio(struct bio *bio)
2206 {
2207         struct blkcg_gq *blkg;
2208         struct throtl_grp *tg;
2209         u64 finish_time_ns;
2210         unsigned long finish_time;
2211         unsigned long start_time;
2212         unsigned long lat;
2213         int rw = bio_data_dir(bio);
2214
2215         blkg = bio->bi_blkg;
2216         if (!blkg)
2217                 return;
2218         tg = blkg_to_tg(blkg);
2219         if (!tg->td->limit_valid[LIMIT_LOW])
2220                 return;
2221
2222         finish_time_ns = ktime_get_ns();
2223         tg->last_finish_time = finish_time_ns >> 10;
2224
2225         start_time = bio_issue_time(&bio->bi_issue) >> 10;
2226         finish_time = __bio_issue_time(finish_time_ns) >> 10;
2227         if (!start_time || finish_time <= start_time)
2228                 return;
2229
2230         lat = finish_time - start_time;
2231         /* this is only for bio based driver */
2232         if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2233                 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2234                                      bio_op(bio), lat);
2235
2236         if (tg->latency_target && lat >= tg->td->filtered_latency) {
2237                 int bucket;
2238                 unsigned int threshold;
2239
2240                 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
2241                 threshold = tg->td->avg_buckets[rw][bucket].latency +
2242                         tg->latency_target;
2243                 if (lat > threshold)
2244                         tg->bad_bio_cnt++;
2245                 /*
2246                  * Not race free, could get wrong count, which means cgroups
2247                  * will be throttled
2248                  */
2249                 tg->bio_cnt++;
2250         }
2251
2252         if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2253                 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2254                 tg->bio_cnt /= 2;
2255                 tg->bad_bio_cnt /= 2;
2256         }
2257 }
2258 #endif
2259
2260 int blk_throtl_init(struct request_queue *q)
2261 {
2262         struct throtl_data *td;
2263         int ret;
2264
2265         td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2266         if (!td)
2267                 return -ENOMEM;
2268         td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2269                 LATENCY_BUCKET_SIZE, __alignof__(u64));
2270         if (!td->latency_buckets[READ]) {
2271                 kfree(td);
2272                 return -ENOMEM;
2273         }
2274         td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2275                 LATENCY_BUCKET_SIZE, __alignof__(u64));
2276         if (!td->latency_buckets[WRITE]) {
2277                 free_percpu(td->latency_buckets[READ]);
2278                 kfree(td);
2279                 return -ENOMEM;
2280         }
2281
2282         INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2283         throtl_service_queue_init(&td->service_queue);
2284
2285         q->td = td;
2286         td->queue = q;
2287
2288         td->limit_valid[LIMIT_MAX] = true;
2289         td->limit_index = LIMIT_MAX;
2290         td->low_upgrade_time = jiffies;
2291         td->low_downgrade_time = jiffies;
2292
2293         /* activate policy */
2294         ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2295         if (ret) {
2296                 free_percpu(td->latency_buckets[READ]);
2297                 free_percpu(td->latency_buckets[WRITE]);
2298                 kfree(td);
2299         }
2300         return ret;
2301 }
2302
2303 void blk_throtl_exit(struct request_queue *q)
2304 {
2305         BUG_ON(!q->td);
2306         del_timer_sync(&q->td->service_queue.pending_timer);
2307         throtl_shutdown_wq(q);
2308         blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2309         free_percpu(q->td->latency_buckets[READ]);
2310         free_percpu(q->td->latency_buckets[WRITE]);
2311         kfree(q->td);
2312 }
2313
2314 void blk_throtl_register_queue(struct request_queue *q)
2315 {
2316         struct throtl_data *td;
2317         int i;
2318
2319         td = q->td;
2320         BUG_ON(!td);
2321
2322         if (blk_queue_nonrot(q)) {
2323                 td->throtl_slice = DFL_THROTL_SLICE_SSD;
2324                 td->filtered_latency = LATENCY_FILTERED_SSD;
2325         } else {
2326                 td->throtl_slice = DFL_THROTL_SLICE_HD;
2327                 td->filtered_latency = LATENCY_FILTERED_HD;
2328                 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2329                         td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2330                         td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2331                 }
2332         }
2333 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2334         /* if no low limit, use previous default */
2335         td->throtl_slice = DFL_THROTL_SLICE_HD;
2336 #endif
2337
2338         td->track_bio_latency = !queue_is_mq(q);
2339         if (!td->track_bio_latency)
2340                 blk_stat_enable_accounting(q);
2341 }
2342
2343 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2344 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2345 {
2346         if (!q->td)
2347                 return -EINVAL;
2348         return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2349 }
2350
2351 ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2352         const char *page, size_t count)
2353 {
2354         unsigned long v;
2355         unsigned long t;
2356
2357         if (!q->td)
2358                 return -EINVAL;
2359         if (kstrtoul(page, 10, &v))
2360                 return -EINVAL;
2361         t = msecs_to_jiffies(v);
2362         if (t == 0 || t > MAX_THROTL_SLICE)
2363                 return -EINVAL;
2364         q->td->throtl_slice = t;
2365         return count;
2366 }
2367 #endif
2368
2369 static int __init throtl_init(void)
2370 {
2371         kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2372         if (!kthrotld_workqueue)
2373                 panic("Failed to create kthrotld\n");
2374
2375         return blkcg_policy_register(&blkcg_policy_throtl);
2376 }
2377
2378 module_init(throtl_init);