Merge tag 'media/v6.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[linux-2.6-block.git] / block / blk-throttle.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interface for controlling IO bandwidth on a request queue
4  *
5  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
13 #include "blk.h"
14 #include "blk-cgroup-rwstat.h"
15 #include "blk-stat.h"
16 #include "blk-throttle.h"
17
18 /* Max dispatch from a group in 1 round */
19 #define THROTL_GRP_QUANTUM 8
20
21 /* Total max dispatch from all groups in one round */
22 #define THROTL_QUANTUM 32
23
24 /* Throttling is performed over a slice and after that slice is renewed */
25 #define DFL_THROTL_SLICE_HD (HZ / 10)
26 #define DFL_THROTL_SLICE_SSD (HZ / 50)
27 #define MAX_THROTL_SLICE (HZ)
28
29 /* A workqueue to queue throttle related work */
30 static struct workqueue_struct *kthrotld_workqueue;
31
32 #define rb_entry_tg(node)       rb_entry((node), struct throtl_grp, rb_node)
33
34 struct throtl_data
35 {
36         /* service tree for active throtl groups */
37         struct throtl_service_queue service_queue;
38
39         struct request_queue *queue;
40
41         /* Total Number of queued bios on READ and WRITE lists */
42         unsigned int nr_queued[2];
43
44         unsigned int throtl_slice;
45
46         /* Work for dispatching throttled bios */
47         struct work_struct dispatch_work;
48
49         bool track_bio_latency;
50 };
51
52 static void throtl_pending_timer_fn(struct timer_list *t);
53
54 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
55 {
56         return pd_to_blkg(&tg->pd);
57 }
58
59 /**
60  * sq_to_tg - return the throl_grp the specified service queue belongs to
61  * @sq: the throtl_service_queue of interest
62  *
63  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
64  * embedded in throtl_data, %NULL is returned.
65  */
66 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
67 {
68         if (sq && sq->parent_sq)
69                 return container_of(sq, struct throtl_grp, service_queue);
70         else
71                 return NULL;
72 }
73
74 /**
75  * sq_to_td - return throtl_data the specified service queue belongs to
76  * @sq: the throtl_service_queue of interest
77  *
78  * A service_queue can be embedded in either a throtl_grp or throtl_data.
79  * Determine the associated throtl_data accordingly and return it.
80  */
81 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
82 {
83         struct throtl_grp *tg = sq_to_tg(sq);
84
85         if (tg)
86                 return tg->td;
87         else
88                 return container_of(sq, struct throtl_data, service_queue);
89 }
90
91 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
92 {
93         struct blkcg_gq *blkg = tg_to_blkg(tg);
94
95         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
96                 return U64_MAX;
97
98         return tg->bps[rw];
99 }
100
101 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
102 {
103         struct blkcg_gq *blkg = tg_to_blkg(tg);
104
105         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
106                 return UINT_MAX;
107
108         return tg->iops[rw];
109 }
110
111 /**
112  * throtl_log - log debug message via blktrace
113  * @sq: the service_queue being reported
114  * @fmt: printf format string
115  * @args: printf args
116  *
117  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
118  * throtl_grp; otherwise, just "throtl".
119  */
120 #define throtl_log(sq, fmt, args...)    do {                            \
121         struct throtl_grp *__tg = sq_to_tg((sq));                       \
122         struct throtl_data *__td = sq_to_td((sq));                      \
123                                                                         \
124         (void)__td;                                                     \
125         if (likely(!blk_trace_note_message_enabled(__td->queue)))       \
126                 break;                                                  \
127         if ((__tg)) {                                                   \
128                 blk_add_cgroup_trace_msg(__td->queue,                   \
129                         &tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\
130         } else {                                                        \
131                 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);  \
132         }                                                               \
133 } while (0)
134
135 static inline unsigned int throtl_bio_data_size(struct bio *bio)
136 {
137         /* assume it's one sector */
138         if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
139                 return 512;
140         return bio->bi_iter.bi_size;
141 }
142
143 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
144 {
145         INIT_LIST_HEAD(&qn->node);
146         bio_list_init(&qn->bios);
147         qn->tg = tg;
148 }
149
150 /**
151  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
152  * @bio: bio being added
153  * @qn: qnode to add bio to
154  * @queued: the service_queue->queued[] list @qn belongs to
155  *
156  * Add @bio to @qn and put @qn on @queued if it's not already on.
157  * @qn->tg's reference count is bumped when @qn is activated.  See the
158  * comment on top of throtl_qnode definition for details.
159  */
160 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
161                                  struct list_head *queued)
162 {
163         bio_list_add(&qn->bios, bio);
164         if (list_empty(&qn->node)) {
165                 list_add_tail(&qn->node, queued);
166                 blkg_get(tg_to_blkg(qn->tg));
167         }
168 }
169
170 /**
171  * throtl_peek_queued - peek the first bio on a qnode list
172  * @queued: the qnode list to peek
173  */
174 static struct bio *throtl_peek_queued(struct list_head *queued)
175 {
176         struct throtl_qnode *qn;
177         struct bio *bio;
178
179         if (list_empty(queued))
180                 return NULL;
181
182         qn = list_first_entry(queued, struct throtl_qnode, node);
183         bio = bio_list_peek(&qn->bios);
184         WARN_ON_ONCE(!bio);
185         return bio;
186 }
187
188 /**
189  * throtl_pop_queued - pop the first bio form a qnode list
190  * @queued: the qnode list to pop a bio from
191  * @tg_to_put: optional out argument for throtl_grp to put
192  *
193  * Pop the first bio from the qnode list @queued.  After popping, the first
194  * qnode is removed from @queued if empty or moved to the end of @queued so
195  * that the popping order is round-robin.
196  *
197  * When the first qnode is removed, its associated throtl_grp should be put
198  * too.  If @tg_to_put is NULL, this function automatically puts it;
199  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
200  * responsible for putting it.
201  */
202 static struct bio *throtl_pop_queued(struct list_head *queued,
203                                      struct throtl_grp **tg_to_put)
204 {
205         struct throtl_qnode *qn;
206         struct bio *bio;
207
208         if (list_empty(queued))
209                 return NULL;
210
211         qn = list_first_entry(queued, struct throtl_qnode, node);
212         bio = bio_list_pop(&qn->bios);
213         WARN_ON_ONCE(!bio);
214
215         if (bio_list_empty(&qn->bios)) {
216                 list_del_init(&qn->node);
217                 if (tg_to_put)
218                         *tg_to_put = qn->tg;
219                 else
220                         blkg_put(tg_to_blkg(qn->tg));
221         } else {
222                 list_move_tail(&qn->node, queued);
223         }
224
225         return bio;
226 }
227
228 /* init a service_queue, assumes the caller zeroed it */
229 static void throtl_service_queue_init(struct throtl_service_queue *sq)
230 {
231         INIT_LIST_HEAD(&sq->queued[READ]);
232         INIT_LIST_HEAD(&sq->queued[WRITE]);
233         sq->pending_tree = RB_ROOT_CACHED;
234         timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
235 }
236
237 static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk,
238                 struct blkcg *blkcg, gfp_t gfp)
239 {
240         struct throtl_grp *tg;
241         int rw;
242
243         tg = kzalloc_node(sizeof(*tg), gfp, disk->node_id);
244         if (!tg)
245                 return NULL;
246
247         if (blkg_rwstat_init(&tg->stat_bytes, gfp))
248                 goto err_free_tg;
249
250         if (blkg_rwstat_init(&tg->stat_ios, gfp))
251                 goto err_exit_stat_bytes;
252
253         throtl_service_queue_init(&tg->service_queue);
254
255         for (rw = READ; rw <= WRITE; rw++) {
256                 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
257                 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
258         }
259
260         RB_CLEAR_NODE(&tg->rb_node);
261         tg->bps[READ] = U64_MAX;
262         tg->bps[WRITE] = U64_MAX;
263         tg->iops[READ] = UINT_MAX;
264         tg->iops[WRITE] = UINT_MAX;
265
266         return &tg->pd;
267
268 err_exit_stat_bytes:
269         blkg_rwstat_exit(&tg->stat_bytes);
270 err_free_tg:
271         kfree(tg);
272         return NULL;
273 }
274
275 static void throtl_pd_init(struct blkg_policy_data *pd)
276 {
277         struct throtl_grp *tg = pd_to_tg(pd);
278         struct blkcg_gq *blkg = tg_to_blkg(tg);
279         struct throtl_data *td = blkg->q->td;
280         struct throtl_service_queue *sq = &tg->service_queue;
281
282         /*
283          * If on the default hierarchy, we switch to properly hierarchical
284          * behavior where limits on a given throtl_grp are applied to the
285          * whole subtree rather than just the group itself.  e.g. If 16M
286          * read_bps limit is set on a parent group, summary bps of
287          * parent group and its subtree groups can't exceed 16M for the
288          * device.
289          *
290          * If not on the default hierarchy, the broken flat hierarchy
291          * behavior is retained where all throtl_grps are treated as if
292          * they're all separate root groups right below throtl_data.
293          * Limits of a group don't interact with limits of other groups
294          * regardless of the position of the group in the hierarchy.
295          */
296         sq->parent_sq = &td->service_queue;
297         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
298                 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
299         tg->td = td;
300 }
301
302 /*
303  * Set has_rules[] if @tg or any of its parents have limits configured.
304  * This doesn't require walking up to the top of the hierarchy as the
305  * parent's has_rules[] is guaranteed to be correct.
306  */
307 static void tg_update_has_rules(struct throtl_grp *tg)
308 {
309         struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
310         int rw;
311
312         for (rw = READ; rw <= WRITE; rw++) {
313                 tg->has_rules_iops[rw] =
314                         (parent_tg && parent_tg->has_rules_iops[rw]) ||
315                         tg_iops_limit(tg, rw) != UINT_MAX;
316                 tg->has_rules_bps[rw] =
317                         (parent_tg && parent_tg->has_rules_bps[rw]) ||
318                         tg_bps_limit(tg, rw) != U64_MAX;
319         }
320 }
321
322 static void throtl_pd_online(struct blkg_policy_data *pd)
323 {
324         struct throtl_grp *tg = pd_to_tg(pd);
325         /*
326          * We don't want new groups to escape the limits of its ancestors.
327          * Update has_rules[] after a new group is brought online.
328          */
329         tg_update_has_rules(tg);
330 }
331
332 static void throtl_pd_free(struct blkg_policy_data *pd)
333 {
334         struct throtl_grp *tg = pd_to_tg(pd);
335
336         del_timer_sync(&tg->service_queue.pending_timer);
337         blkg_rwstat_exit(&tg->stat_bytes);
338         blkg_rwstat_exit(&tg->stat_ios);
339         kfree(tg);
340 }
341
342 static struct throtl_grp *
343 throtl_rb_first(struct throtl_service_queue *parent_sq)
344 {
345         struct rb_node *n;
346
347         n = rb_first_cached(&parent_sq->pending_tree);
348         WARN_ON_ONCE(!n);
349         if (!n)
350                 return NULL;
351         return rb_entry_tg(n);
352 }
353
354 static void throtl_rb_erase(struct rb_node *n,
355                             struct throtl_service_queue *parent_sq)
356 {
357         rb_erase_cached(n, &parent_sq->pending_tree);
358         RB_CLEAR_NODE(n);
359 }
360
361 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
362 {
363         struct throtl_grp *tg;
364
365         tg = throtl_rb_first(parent_sq);
366         if (!tg)
367                 return;
368
369         parent_sq->first_pending_disptime = tg->disptime;
370 }
371
372 static void tg_service_queue_add(struct throtl_grp *tg)
373 {
374         struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
375         struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
376         struct rb_node *parent = NULL;
377         struct throtl_grp *__tg;
378         unsigned long key = tg->disptime;
379         bool leftmost = true;
380
381         while (*node != NULL) {
382                 parent = *node;
383                 __tg = rb_entry_tg(parent);
384
385                 if (time_before(key, __tg->disptime))
386                         node = &parent->rb_left;
387                 else {
388                         node = &parent->rb_right;
389                         leftmost = false;
390                 }
391         }
392
393         rb_link_node(&tg->rb_node, parent, node);
394         rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
395                                leftmost);
396 }
397
398 static void throtl_enqueue_tg(struct throtl_grp *tg)
399 {
400         if (!(tg->flags & THROTL_TG_PENDING)) {
401                 tg_service_queue_add(tg);
402                 tg->flags |= THROTL_TG_PENDING;
403                 tg->service_queue.parent_sq->nr_pending++;
404         }
405 }
406
407 static void throtl_dequeue_tg(struct throtl_grp *tg)
408 {
409         if (tg->flags & THROTL_TG_PENDING) {
410                 struct throtl_service_queue *parent_sq =
411                         tg->service_queue.parent_sq;
412
413                 throtl_rb_erase(&tg->rb_node, parent_sq);
414                 --parent_sq->nr_pending;
415                 tg->flags &= ~THROTL_TG_PENDING;
416         }
417 }
418
419 /* Call with queue lock held */
420 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
421                                           unsigned long expires)
422 {
423         unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
424
425         /*
426          * Since we are adjusting the throttle limit dynamically, the sleep
427          * time calculated according to previous limit might be invalid. It's
428          * possible the cgroup sleep time is very long and no other cgroups
429          * have IO running so notify the limit changes. Make sure the cgroup
430          * doesn't sleep too long to avoid the missed notification.
431          */
432         if (time_after(expires, max_expire))
433                 expires = max_expire;
434         mod_timer(&sq->pending_timer, expires);
435         throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
436                    expires - jiffies, jiffies);
437 }
438
439 /**
440  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
441  * @sq: the service_queue to schedule dispatch for
442  * @force: force scheduling
443  *
444  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
445  * dispatch time of the first pending child.  Returns %true if either timer
446  * is armed or there's no pending child left.  %false if the current
447  * dispatch window is still open and the caller should continue
448  * dispatching.
449  *
450  * If @force is %true, the dispatch timer is always scheduled and this
451  * function is guaranteed to return %true.  This is to be used when the
452  * caller can't dispatch itself and needs to invoke pending_timer
453  * unconditionally.  Note that forced scheduling is likely to induce short
454  * delay before dispatch starts even if @sq->first_pending_disptime is not
455  * in the future and thus shouldn't be used in hot paths.
456  */
457 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
458                                           bool force)
459 {
460         /* any pending children left? */
461         if (!sq->nr_pending)
462                 return true;
463
464         update_min_dispatch_time(sq);
465
466         /* is the next dispatch time in the future? */
467         if (force || time_after(sq->first_pending_disptime, jiffies)) {
468                 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
469                 return true;
470         }
471
472         /* tell the caller to continue dispatching */
473         return false;
474 }
475
476 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
477                 bool rw, unsigned long start)
478 {
479         tg->bytes_disp[rw] = 0;
480         tg->io_disp[rw] = 0;
481         tg->carryover_bytes[rw] = 0;
482         tg->carryover_ios[rw] = 0;
483
484         /*
485          * Previous slice has expired. We must have trimmed it after last
486          * bio dispatch. That means since start of last slice, we never used
487          * that bandwidth. Do try to make use of that bandwidth while giving
488          * credit.
489          */
490         if (time_after(start, tg->slice_start[rw]))
491                 tg->slice_start[rw] = start;
492
493         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
494         throtl_log(&tg->service_queue,
495                    "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
496                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
497                    tg->slice_end[rw], jiffies);
498 }
499
500 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
501                                           bool clear_carryover)
502 {
503         tg->bytes_disp[rw] = 0;
504         tg->io_disp[rw] = 0;
505         tg->slice_start[rw] = jiffies;
506         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
507         if (clear_carryover) {
508                 tg->carryover_bytes[rw] = 0;
509                 tg->carryover_ios[rw] = 0;
510         }
511
512         throtl_log(&tg->service_queue,
513                    "[%c] new slice start=%lu end=%lu jiffies=%lu",
514                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
515                    tg->slice_end[rw], jiffies);
516 }
517
518 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
519                                         unsigned long jiffy_end)
520 {
521         tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
522 }
523
524 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
525                                        unsigned long jiffy_end)
526 {
527         throtl_set_slice_end(tg, rw, jiffy_end);
528         throtl_log(&tg->service_queue,
529                    "[%c] extend slice start=%lu end=%lu jiffies=%lu",
530                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
531                    tg->slice_end[rw], jiffies);
532 }
533
534 /* Determine if previously allocated or extended slice is complete or not */
535 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
536 {
537         if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
538                 return false;
539
540         return true;
541 }
542
543 static unsigned int calculate_io_allowed(u32 iops_limit,
544                                          unsigned long jiffy_elapsed)
545 {
546         unsigned int io_allowed;
547         u64 tmp;
548
549         /*
550          * jiffy_elapsed should not be a big value as minimum iops can be
551          * 1 then at max jiffy elapsed should be equivalent of 1 second as we
552          * will allow dispatch after 1 second and after that slice should
553          * have been trimmed.
554          */
555
556         tmp = (u64)iops_limit * jiffy_elapsed;
557         do_div(tmp, HZ);
558
559         if (tmp > UINT_MAX)
560                 io_allowed = UINT_MAX;
561         else
562                 io_allowed = tmp;
563
564         return io_allowed;
565 }
566
567 static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
568 {
569         /*
570          * Can result be wider than 64 bits?
571          * We check against 62, not 64, due to ilog2 truncation.
572          */
573         if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62)
574                 return U64_MAX;
575         return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
576 }
577
578 /* Trim the used slices and adjust slice start accordingly */
579 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
580 {
581         unsigned long time_elapsed;
582         long long bytes_trim;
583         int io_trim;
584
585         BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
586
587         /*
588          * If bps are unlimited (-1), then time slice don't get
589          * renewed. Don't try to trim the slice if slice is used. A new
590          * slice will start when appropriate.
591          */
592         if (throtl_slice_used(tg, rw))
593                 return;
594
595         /*
596          * A bio has been dispatched. Also adjust slice_end. It might happen
597          * that initially cgroup limit was very low resulting in high
598          * slice_end, but later limit was bumped up and bio was dispatched
599          * sooner, then we need to reduce slice_end. A high bogus slice_end
600          * is bad because it does not allow new slice to start.
601          */
602
603         throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
604
605         time_elapsed = rounddown(jiffies - tg->slice_start[rw],
606                                  tg->td->throtl_slice);
607         if (!time_elapsed)
608                 return;
609
610         bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
611                                              time_elapsed) +
612                      tg->carryover_bytes[rw];
613         io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
614                   tg->carryover_ios[rw];
615         if (bytes_trim <= 0 && io_trim <= 0)
616                 return;
617
618         tg->carryover_bytes[rw] = 0;
619         if ((long long)tg->bytes_disp[rw] >= bytes_trim)
620                 tg->bytes_disp[rw] -= bytes_trim;
621         else
622                 tg->bytes_disp[rw] = 0;
623
624         tg->carryover_ios[rw] = 0;
625         if ((int)tg->io_disp[rw] >= io_trim)
626                 tg->io_disp[rw] -= io_trim;
627         else
628                 tg->io_disp[rw] = 0;
629
630         tg->slice_start[rw] += time_elapsed;
631
632         throtl_log(&tg->service_queue,
633                    "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
634                    rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
635                    bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
636                    jiffies);
637 }
638
639 static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
640 {
641         unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
642         u64 bps_limit = tg_bps_limit(tg, rw);
643         u32 iops_limit = tg_iops_limit(tg, rw);
644
645         /*
646          * If config is updated while bios are still throttled, calculate and
647          * accumulate how many bytes/ios are waited across changes. And
648          * carryover_bytes/ios will be used to calculate new wait time under new
649          * configuration.
650          */
651         if (bps_limit != U64_MAX)
652                 tg->carryover_bytes[rw] +=
653                         calculate_bytes_allowed(bps_limit, jiffy_elapsed) -
654                         tg->bytes_disp[rw];
655         if (iops_limit != UINT_MAX)
656                 tg->carryover_ios[rw] +=
657                         calculate_io_allowed(iops_limit, jiffy_elapsed) -
658                         tg->io_disp[rw];
659 }
660
661 static void tg_update_carryover(struct throtl_grp *tg)
662 {
663         if (tg->service_queue.nr_queued[READ])
664                 __tg_update_carryover(tg, READ);
665         if (tg->service_queue.nr_queued[WRITE])
666                 __tg_update_carryover(tg, WRITE);
667
668         /* see comments in struct throtl_grp for meaning of these fields. */
669         throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__,
670                    tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
671                    tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
672 }
673
674 static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
675                                  u32 iops_limit)
676 {
677         bool rw = bio_data_dir(bio);
678         int io_allowed;
679         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
680
681         if (iops_limit == UINT_MAX) {
682                 return 0;
683         }
684
685         jiffy_elapsed = jiffies - tg->slice_start[rw];
686
687         /* Round up to the next throttle slice, wait time must be nonzero */
688         jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
689         io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) +
690                      tg->carryover_ios[rw];
691         if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
692                 return 0;
693
694         /* Calc approx time to dispatch */
695         jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
696
697         /* make sure at least one io can be dispatched after waiting */
698         jiffy_wait = max(jiffy_wait, HZ / iops_limit + 1);
699         return jiffy_wait;
700 }
701
702 static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
703                                 u64 bps_limit)
704 {
705         bool rw = bio_data_dir(bio);
706         long long bytes_allowed;
707         u64 extra_bytes;
708         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
709         unsigned int bio_size = throtl_bio_data_size(bio);
710
711         /* no need to throttle if this bio's bytes have been accounted */
712         if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) {
713                 return 0;
714         }
715
716         jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
717
718         /* Slice has just started. Consider one slice interval */
719         if (!jiffy_elapsed)
720                 jiffy_elapsed_rnd = tg->td->throtl_slice;
721
722         jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
723         bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) +
724                         tg->carryover_bytes[rw];
725         if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
726                 return 0;
727
728         /* Calc approx time to dispatch */
729         extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
730         jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
731
732         if (!jiffy_wait)
733                 jiffy_wait = 1;
734
735         /*
736          * This wait time is without taking into consideration the rounding
737          * up we did. Add that time also.
738          */
739         jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
740         return jiffy_wait;
741 }
742
743 /*
744  * Returns whether one can dispatch a bio or not. Also returns approx number
745  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
746  */
747 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
748                             unsigned long *wait)
749 {
750         bool rw = bio_data_dir(bio);
751         unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
752         u64 bps_limit = tg_bps_limit(tg, rw);
753         u32 iops_limit = tg_iops_limit(tg, rw);
754
755         /*
756          * Currently whole state machine of group depends on first bio
757          * queued in the group bio list. So one should not be calling
758          * this function with a different bio if there are other bios
759          * queued.
760          */
761         BUG_ON(tg->service_queue.nr_queued[rw] &&
762                bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
763
764         /* If tg->bps = -1, then BW is unlimited */
765         if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) ||
766             tg->flags & THROTL_TG_CANCELING) {
767                 if (wait)
768                         *wait = 0;
769                 return true;
770         }
771
772         /*
773          * If previous slice expired, start a new one otherwise renew/extend
774          * existing slice to make sure it is at least throtl_slice interval
775          * long since now. New slice is started only for empty throttle group.
776          * If there is queued bio, that means there should be an active
777          * slice and it should be extended instead.
778          */
779         if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
780                 throtl_start_new_slice(tg, rw, true);
781         else {
782                 if (time_before(tg->slice_end[rw],
783                     jiffies + tg->td->throtl_slice))
784                         throtl_extend_slice(tg, rw,
785                                 jiffies + tg->td->throtl_slice);
786         }
787
788         bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
789         iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
790         if (bps_wait + iops_wait == 0) {
791                 if (wait)
792                         *wait = 0;
793                 return true;
794         }
795
796         max_wait = max(bps_wait, iops_wait);
797
798         if (wait)
799                 *wait = max_wait;
800
801         if (time_before(tg->slice_end[rw], jiffies + max_wait))
802                 throtl_extend_slice(tg, rw, jiffies + max_wait);
803
804         return false;
805 }
806
807 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
808 {
809         bool rw = bio_data_dir(bio);
810         unsigned int bio_size = throtl_bio_data_size(bio);
811
812         /* Charge the bio to the group */
813         if (!bio_flagged(bio, BIO_BPS_THROTTLED)) {
814                 tg->bytes_disp[rw] += bio_size;
815                 tg->last_bytes_disp[rw] += bio_size;
816         }
817
818         tg->io_disp[rw]++;
819         tg->last_io_disp[rw]++;
820 }
821
822 /**
823  * throtl_add_bio_tg - add a bio to the specified throtl_grp
824  * @bio: bio to add
825  * @qn: qnode to use
826  * @tg: the target throtl_grp
827  *
828  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
829  * tg->qnode_on_self[] is used.
830  */
831 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
832                               struct throtl_grp *tg)
833 {
834         struct throtl_service_queue *sq = &tg->service_queue;
835         bool rw = bio_data_dir(bio);
836
837         if (!qn)
838                 qn = &tg->qnode_on_self[rw];
839
840         /*
841          * If @tg doesn't currently have any bios queued in the same
842          * direction, queueing @bio can change when @tg should be
843          * dispatched.  Mark that @tg was empty.  This is automatically
844          * cleared on the next tg_update_disptime().
845          */
846         if (!sq->nr_queued[rw])
847                 tg->flags |= THROTL_TG_WAS_EMPTY;
848
849         throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
850
851         sq->nr_queued[rw]++;
852         throtl_enqueue_tg(tg);
853 }
854
855 static void tg_update_disptime(struct throtl_grp *tg)
856 {
857         struct throtl_service_queue *sq = &tg->service_queue;
858         unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
859         struct bio *bio;
860
861         bio = throtl_peek_queued(&sq->queued[READ]);
862         if (bio)
863                 tg_may_dispatch(tg, bio, &read_wait);
864
865         bio = throtl_peek_queued(&sq->queued[WRITE]);
866         if (bio)
867                 tg_may_dispatch(tg, bio, &write_wait);
868
869         min_wait = min(read_wait, write_wait);
870         disptime = jiffies + min_wait;
871
872         /* Update dispatch time */
873         throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
874         tg->disptime = disptime;
875         tg_service_queue_add(tg);
876
877         /* see throtl_add_bio_tg() */
878         tg->flags &= ~THROTL_TG_WAS_EMPTY;
879 }
880
881 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
882                                         struct throtl_grp *parent_tg, bool rw)
883 {
884         if (throtl_slice_used(parent_tg, rw)) {
885                 throtl_start_new_slice_with_credit(parent_tg, rw,
886                                 child_tg->slice_start[rw]);
887         }
888
889 }
890
891 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
892 {
893         struct throtl_service_queue *sq = &tg->service_queue;
894         struct throtl_service_queue *parent_sq = sq->parent_sq;
895         struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
896         struct throtl_grp *tg_to_put = NULL;
897         struct bio *bio;
898
899         /*
900          * @bio is being transferred from @tg to @parent_sq.  Popping a bio
901          * from @tg may put its reference and @parent_sq might end up
902          * getting released prematurely.  Remember the tg to put and put it
903          * after @bio is transferred to @parent_sq.
904          */
905         bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
906         sq->nr_queued[rw]--;
907
908         throtl_charge_bio(tg, bio);
909
910         /*
911          * If our parent is another tg, we just need to transfer @bio to
912          * the parent using throtl_add_bio_tg().  If our parent is
913          * @td->service_queue, @bio is ready to be issued.  Put it on its
914          * bio_lists[] and decrease total number queued.  The caller is
915          * responsible for issuing these bios.
916          */
917         if (parent_tg) {
918                 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
919                 start_parent_slice_with_credit(tg, parent_tg, rw);
920         } else {
921                 bio_set_flag(bio, BIO_BPS_THROTTLED);
922                 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
923                                      &parent_sq->queued[rw]);
924                 BUG_ON(tg->td->nr_queued[rw] <= 0);
925                 tg->td->nr_queued[rw]--;
926         }
927
928         throtl_trim_slice(tg, rw);
929
930         if (tg_to_put)
931                 blkg_put(tg_to_blkg(tg_to_put));
932 }
933
934 static int throtl_dispatch_tg(struct throtl_grp *tg)
935 {
936         struct throtl_service_queue *sq = &tg->service_queue;
937         unsigned int nr_reads = 0, nr_writes = 0;
938         unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
939         unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
940         struct bio *bio;
941
942         /* Try to dispatch 75% READS and 25% WRITES */
943
944         while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
945                tg_may_dispatch(tg, bio, NULL)) {
946
947                 tg_dispatch_one_bio(tg, READ);
948                 nr_reads++;
949
950                 if (nr_reads >= max_nr_reads)
951                         break;
952         }
953
954         while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
955                tg_may_dispatch(tg, bio, NULL)) {
956
957                 tg_dispatch_one_bio(tg, WRITE);
958                 nr_writes++;
959
960                 if (nr_writes >= max_nr_writes)
961                         break;
962         }
963
964         return nr_reads + nr_writes;
965 }
966
967 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
968 {
969         unsigned int nr_disp = 0;
970
971         while (1) {
972                 struct throtl_grp *tg;
973                 struct throtl_service_queue *sq;
974
975                 if (!parent_sq->nr_pending)
976                         break;
977
978                 tg = throtl_rb_first(parent_sq);
979                 if (!tg)
980                         break;
981
982                 if (time_before(jiffies, tg->disptime))
983                         break;
984
985                 nr_disp += throtl_dispatch_tg(tg);
986
987                 sq = &tg->service_queue;
988                 if (sq->nr_queued[READ] || sq->nr_queued[WRITE])
989                         tg_update_disptime(tg);
990                 else
991                         throtl_dequeue_tg(tg);
992
993                 if (nr_disp >= THROTL_QUANTUM)
994                         break;
995         }
996
997         return nr_disp;
998 }
999
1000 /**
1001  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1002  * @t: the pending_timer member of the throtl_service_queue being serviced
1003  *
1004  * This timer is armed when a child throtl_grp with active bio's become
1005  * pending and queued on the service_queue's pending_tree and expires when
1006  * the first child throtl_grp should be dispatched.  This function
1007  * dispatches bio's from the children throtl_grps to the parent
1008  * service_queue.
1009  *
1010  * If the parent's parent is another throtl_grp, dispatching is propagated
1011  * by either arming its pending_timer or repeating dispatch directly.  If
1012  * the top-level service_tree is reached, throtl_data->dispatch_work is
1013  * kicked so that the ready bio's are issued.
1014  */
1015 static void throtl_pending_timer_fn(struct timer_list *t)
1016 {
1017         struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1018         struct throtl_grp *tg = sq_to_tg(sq);
1019         struct throtl_data *td = sq_to_td(sq);
1020         struct throtl_service_queue *parent_sq;
1021         struct request_queue *q;
1022         bool dispatched;
1023         int ret;
1024
1025         /* throtl_data may be gone, so figure out request queue by blkg */
1026         if (tg)
1027                 q = tg->pd.blkg->q;
1028         else
1029                 q = td->queue;
1030
1031         spin_lock_irq(&q->queue_lock);
1032
1033         if (!q->root_blkg)
1034                 goto out_unlock;
1035
1036 again:
1037         parent_sq = sq->parent_sq;
1038         dispatched = false;
1039
1040         while (true) {
1041                 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1042                            sq->nr_queued[READ] + sq->nr_queued[WRITE],
1043                            sq->nr_queued[READ], sq->nr_queued[WRITE]);
1044
1045                 ret = throtl_select_dispatch(sq);
1046                 if (ret) {
1047                         throtl_log(sq, "bios disp=%u", ret);
1048                         dispatched = true;
1049                 }
1050
1051                 if (throtl_schedule_next_dispatch(sq, false))
1052                         break;
1053
1054                 /* this dispatch windows is still open, relax and repeat */
1055                 spin_unlock_irq(&q->queue_lock);
1056                 cpu_relax();
1057                 spin_lock_irq(&q->queue_lock);
1058         }
1059
1060         if (!dispatched)
1061                 goto out_unlock;
1062
1063         if (parent_sq) {
1064                 /* @parent_sq is another throl_grp, propagate dispatch */
1065                 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1066                         tg_update_disptime(tg);
1067                         if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1068                                 /* window is already open, repeat dispatching */
1069                                 sq = parent_sq;
1070                                 tg = sq_to_tg(sq);
1071                                 goto again;
1072                         }
1073                 }
1074         } else {
1075                 /* reached the top-level, queue issuing */
1076                 queue_work(kthrotld_workqueue, &td->dispatch_work);
1077         }
1078 out_unlock:
1079         spin_unlock_irq(&q->queue_lock);
1080 }
1081
1082 /**
1083  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1084  * @work: work item being executed
1085  *
1086  * This function is queued for execution when bios reach the bio_lists[]
1087  * of throtl_data->service_queue.  Those bios are ready and issued by this
1088  * function.
1089  */
1090 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1091 {
1092         struct throtl_data *td = container_of(work, struct throtl_data,
1093                                               dispatch_work);
1094         struct throtl_service_queue *td_sq = &td->service_queue;
1095         struct request_queue *q = td->queue;
1096         struct bio_list bio_list_on_stack;
1097         struct bio *bio;
1098         struct blk_plug plug;
1099         int rw;
1100
1101         bio_list_init(&bio_list_on_stack);
1102
1103         spin_lock_irq(&q->queue_lock);
1104         for (rw = READ; rw <= WRITE; rw++)
1105                 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1106                         bio_list_add(&bio_list_on_stack, bio);
1107         spin_unlock_irq(&q->queue_lock);
1108
1109         if (!bio_list_empty(&bio_list_on_stack)) {
1110                 blk_start_plug(&plug);
1111                 while ((bio = bio_list_pop(&bio_list_on_stack)))
1112                         submit_bio_noacct_nocheck(bio);
1113                 blk_finish_plug(&plug);
1114         }
1115 }
1116
1117 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1118                               int off)
1119 {
1120         struct throtl_grp *tg = pd_to_tg(pd);
1121         u64 v = *(u64 *)((void *)tg + off);
1122
1123         if (v == U64_MAX)
1124                 return 0;
1125         return __blkg_prfill_u64(sf, pd, v);
1126 }
1127
1128 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1129                                int off)
1130 {
1131         struct throtl_grp *tg = pd_to_tg(pd);
1132         unsigned int v = *(unsigned int *)((void *)tg + off);
1133
1134         if (v == UINT_MAX)
1135                 return 0;
1136         return __blkg_prfill_u64(sf, pd, v);
1137 }
1138
1139 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1140 {
1141         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1142                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1143         return 0;
1144 }
1145
1146 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1147 {
1148         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1149                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1150         return 0;
1151 }
1152
1153 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1154 {
1155         struct throtl_service_queue *sq = &tg->service_queue;
1156         struct cgroup_subsys_state *pos_css;
1157         struct blkcg_gq *blkg;
1158
1159         throtl_log(&tg->service_queue,
1160                    "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1161                    tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1162                    tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1163
1164         rcu_read_lock();
1165         /*
1166          * Update has_rules[] flags for the updated tg's subtree.  A tg is
1167          * considered to have rules if either the tg itself or any of its
1168          * ancestors has rules.  This identifies groups without any
1169          * restrictions in the whole hierarchy and allows them to bypass
1170          * blk-throttle.
1171          */
1172         blkg_for_each_descendant_pre(blkg, pos_css,
1173                         global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1174                 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1175
1176                 tg_update_has_rules(this_tg);
1177                 /* ignore root/second level */
1178                 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1179                     !blkg->parent->parent)
1180                         continue;
1181         }
1182         rcu_read_unlock();
1183
1184         /*
1185          * We're already holding queue_lock and know @tg is valid.  Let's
1186          * apply the new config directly.
1187          *
1188          * Restart the slices for both READ and WRITES. It might happen
1189          * that a group's limit are dropped suddenly and we don't want to
1190          * account recently dispatched IO with new low rate.
1191          */
1192         throtl_start_new_slice(tg, READ, false);
1193         throtl_start_new_slice(tg, WRITE, false);
1194
1195         if (tg->flags & THROTL_TG_PENDING) {
1196                 tg_update_disptime(tg);
1197                 throtl_schedule_next_dispatch(sq->parent_sq, true);
1198         }
1199 }
1200
1201 static int blk_throtl_init(struct gendisk *disk)
1202 {
1203         struct request_queue *q = disk->queue;
1204         struct throtl_data *td;
1205         int ret;
1206
1207         td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1208         if (!td)
1209                 return -ENOMEM;
1210
1211         INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1212         throtl_service_queue_init(&td->service_queue);
1213
1214         /*
1215          * Freeze queue before activating policy, to synchronize with IO path,
1216          * which is protected by 'q_usage_counter'.
1217          */
1218         blk_mq_freeze_queue(disk->queue);
1219         blk_mq_quiesce_queue(disk->queue);
1220
1221         q->td = td;
1222         td->queue = q;
1223
1224         /* activate policy */
1225         ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
1226         if (ret) {
1227                 q->td = NULL;
1228                 kfree(td);
1229                 goto out;
1230         }
1231
1232         if (blk_queue_nonrot(q))
1233                 td->throtl_slice = DFL_THROTL_SLICE_SSD;
1234         else
1235                 td->throtl_slice = DFL_THROTL_SLICE_HD;
1236         td->track_bio_latency = !queue_is_mq(q);
1237         if (!td->track_bio_latency)
1238                 blk_stat_enable_accounting(q);
1239
1240 out:
1241         blk_mq_unquiesce_queue(disk->queue);
1242         blk_mq_unfreeze_queue(disk->queue);
1243
1244         return ret;
1245 }
1246
1247
1248 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1249                            char *buf, size_t nbytes, loff_t off, bool is_u64)
1250 {
1251         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1252         struct blkg_conf_ctx ctx;
1253         struct throtl_grp *tg;
1254         int ret;
1255         u64 v;
1256
1257         blkg_conf_init(&ctx, buf);
1258
1259         ret = blkg_conf_open_bdev(&ctx);
1260         if (ret)
1261                 goto out_finish;
1262
1263         if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1264                 ret = blk_throtl_init(ctx.bdev->bd_disk);
1265                 if (ret)
1266                         goto out_finish;
1267         }
1268
1269         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
1270         if (ret)
1271                 goto out_finish;
1272
1273         ret = -EINVAL;
1274         if (sscanf(ctx.body, "%llu", &v) != 1)
1275                 goto out_finish;
1276         if (!v)
1277                 v = U64_MAX;
1278
1279         tg = blkg_to_tg(ctx.blkg);
1280         tg_update_carryover(tg);
1281
1282         if (is_u64)
1283                 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1284         else
1285                 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1286
1287         tg_conf_updated(tg, false);
1288         ret = 0;
1289 out_finish:
1290         blkg_conf_exit(&ctx);
1291         return ret ?: nbytes;
1292 }
1293
1294 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1295                                char *buf, size_t nbytes, loff_t off)
1296 {
1297         return tg_set_conf(of, buf, nbytes, off, true);
1298 }
1299
1300 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1301                                 char *buf, size_t nbytes, loff_t off)
1302 {
1303         return tg_set_conf(of, buf, nbytes, off, false);
1304 }
1305
1306 static int tg_print_rwstat(struct seq_file *sf, void *v)
1307 {
1308         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1309                           blkg_prfill_rwstat, &blkcg_policy_throtl,
1310                           seq_cft(sf)->private, true);
1311         return 0;
1312 }
1313
1314 static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1315                                       struct blkg_policy_data *pd, int off)
1316 {
1317         struct blkg_rwstat_sample sum;
1318
1319         blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1320                                   &sum);
1321         return __blkg_prfill_rwstat(sf, pd, &sum);
1322 }
1323
1324 static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
1325 {
1326         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1327                           tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1328                           seq_cft(sf)->private, true);
1329         return 0;
1330 }
1331
1332 static struct cftype throtl_legacy_files[] = {
1333         {
1334                 .name = "throttle.read_bps_device",
1335                 .private = offsetof(struct throtl_grp, bps[READ]),
1336                 .seq_show = tg_print_conf_u64,
1337                 .write = tg_set_conf_u64,
1338         },
1339         {
1340                 .name = "throttle.write_bps_device",
1341                 .private = offsetof(struct throtl_grp, bps[WRITE]),
1342                 .seq_show = tg_print_conf_u64,
1343                 .write = tg_set_conf_u64,
1344         },
1345         {
1346                 .name = "throttle.read_iops_device",
1347                 .private = offsetof(struct throtl_grp, iops[READ]),
1348                 .seq_show = tg_print_conf_uint,
1349                 .write = tg_set_conf_uint,
1350         },
1351         {
1352                 .name = "throttle.write_iops_device",
1353                 .private = offsetof(struct throtl_grp, iops[WRITE]),
1354                 .seq_show = tg_print_conf_uint,
1355                 .write = tg_set_conf_uint,
1356         },
1357         {
1358                 .name = "throttle.io_service_bytes",
1359                 .private = offsetof(struct throtl_grp, stat_bytes),
1360                 .seq_show = tg_print_rwstat,
1361         },
1362         {
1363                 .name = "throttle.io_service_bytes_recursive",
1364                 .private = offsetof(struct throtl_grp, stat_bytes),
1365                 .seq_show = tg_print_rwstat_recursive,
1366         },
1367         {
1368                 .name = "throttle.io_serviced",
1369                 .private = offsetof(struct throtl_grp, stat_ios),
1370                 .seq_show = tg_print_rwstat,
1371         },
1372         {
1373                 .name = "throttle.io_serviced_recursive",
1374                 .private = offsetof(struct throtl_grp, stat_ios),
1375                 .seq_show = tg_print_rwstat_recursive,
1376         },
1377         { }     /* terminate */
1378 };
1379
1380 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1381                          int off)
1382 {
1383         struct throtl_grp *tg = pd_to_tg(pd);
1384         const char *dname = blkg_dev_name(pd->blkg);
1385         u64 bps_dft;
1386         unsigned int iops_dft;
1387
1388         if (!dname)
1389                 return 0;
1390
1391         bps_dft = U64_MAX;
1392         iops_dft = UINT_MAX;
1393
1394         if (tg->bps[READ] == bps_dft &&
1395             tg->bps[WRITE] == bps_dft &&
1396             tg->iops[READ] == iops_dft &&
1397             tg->iops[WRITE] == iops_dft)
1398                 return 0;
1399
1400         seq_printf(sf, "%s", dname);
1401         if (tg->bps[READ] == U64_MAX)
1402                 seq_printf(sf, " rbps=max");
1403         else
1404                 seq_printf(sf, " rbps=%llu", tg->bps[READ]);
1405
1406         if (tg->bps[WRITE] == U64_MAX)
1407                 seq_printf(sf, " wbps=max");
1408         else
1409                 seq_printf(sf, " wbps=%llu", tg->bps[WRITE]);
1410
1411         if (tg->iops[READ] == UINT_MAX)
1412                 seq_printf(sf, " riops=max");
1413         else
1414                 seq_printf(sf, " riops=%u", tg->iops[READ]);
1415
1416         if (tg->iops[WRITE] == UINT_MAX)
1417                 seq_printf(sf, " wiops=max");
1418         else
1419                 seq_printf(sf, " wiops=%u", tg->iops[WRITE]);
1420
1421         seq_printf(sf, "\n");
1422         return 0;
1423 }
1424
1425 static int tg_print_limit(struct seq_file *sf, void *v)
1426 {
1427         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1428                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1429         return 0;
1430 }
1431
1432 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1433                           char *buf, size_t nbytes, loff_t off)
1434 {
1435         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1436         struct blkg_conf_ctx ctx;
1437         struct throtl_grp *tg;
1438         u64 v[4];
1439         int ret;
1440
1441         blkg_conf_init(&ctx, buf);
1442
1443         ret = blkg_conf_open_bdev(&ctx);
1444         if (ret)
1445                 goto out_finish;
1446
1447         if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1448                 ret = blk_throtl_init(ctx.bdev->bd_disk);
1449                 if (ret)
1450                         goto out_finish;
1451         }
1452
1453         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
1454         if (ret)
1455                 goto out_finish;
1456
1457         tg = blkg_to_tg(ctx.blkg);
1458         tg_update_carryover(tg);
1459
1460         v[0] = tg->bps[READ];
1461         v[1] = tg->bps[WRITE];
1462         v[2] = tg->iops[READ];
1463         v[3] = tg->iops[WRITE];
1464
1465         while (true) {
1466                 char tok[27];   /* wiops=18446744073709551616 */
1467                 char *p;
1468                 u64 val = U64_MAX;
1469                 int len;
1470
1471                 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1472                         break;
1473                 if (tok[0] == '\0')
1474                         break;
1475                 ctx.body += len;
1476
1477                 ret = -EINVAL;
1478                 p = tok;
1479                 strsep(&p, "=");
1480                 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1481                         goto out_finish;
1482
1483                 ret = -ERANGE;
1484                 if (!val)
1485                         goto out_finish;
1486
1487                 ret = -EINVAL;
1488                 if (!strcmp(tok, "rbps") && val > 1)
1489                         v[0] = val;
1490                 else if (!strcmp(tok, "wbps") && val > 1)
1491                         v[1] = val;
1492                 else if (!strcmp(tok, "riops") && val > 1)
1493                         v[2] = min_t(u64, val, UINT_MAX);
1494                 else if (!strcmp(tok, "wiops") && val > 1)
1495                         v[3] = min_t(u64, val, UINT_MAX);
1496                 else
1497                         goto out_finish;
1498         }
1499
1500         tg->bps[READ] = v[0];
1501         tg->bps[WRITE] = v[1];
1502         tg->iops[READ] = v[2];
1503         tg->iops[WRITE] = v[3];
1504
1505         tg_conf_updated(tg, false);
1506         ret = 0;
1507 out_finish:
1508         blkg_conf_exit(&ctx);
1509         return ret ?: nbytes;
1510 }
1511
1512 static struct cftype throtl_files[] = {
1513         {
1514                 .name = "max",
1515                 .flags = CFTYPE_NOT_ON_ROOT,
1516                 .seq_show = tg_print_limit,
1517                 .write = tg_set_limit,
1518         },
1519         { }     /* terminate */
1520 };
1521
1522 static void throtl_shutdown_wq(struct request_queue *q)
1523 {
1524         struct throtl_data *td = q->td;
1525
1526         cancel_work_sync(&td->dispatch_work);
1527 }
1528
1529 struct blkcg_policy blkcg_policy_throtl = {
1530         .dfl_cftypes            = throtl_files,
1531         .legacy_cftypes         = throtl_legacy_files,
1532
1533         .pd_alloc_fn            = throtl_pd_alloc,
1534         .pd_init_fn             = throtl_pd_init,
1535         .pd_online_fn           = throtl_pd_online,
1536         .pd_free_fn             = throtl_pd_free,
1537 };
1538
1539 void blk_throtl_cancel_bios(struct gendisk *disk)
1540 {
1541         struct request_queue *q = disk->queue;
1542         struct cgroup_subsys_state *pos_css;
1543         struct blkcg_gq *blkg;
1544
1545         if (!blk_throtl_activated(q))
1546                 return;
1547
1548         spin_lock_irq(&q->queue_lock);
1549         /*
1550          * queue_lock is held, rcu lock is not needed here technically.
1551          * However, rcu lock is still held to emphasize that following
1552          * path need RCU protection and to prevent warning from lockdep.
1553          */
1554         rcu_read_lock();
1555         blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
1556                 struct throtl_grp *tg = blkg_to_tg(blkg);
1557                 struct throtl_service_queue *sq = &tg->service_queue;
1558
1559                 /*
1560                  * Set the flag to make sure throtl_pending_timer_fn() won't
1561                  * stop until all throttled bios are dispatched.
1562                  */
1563                 tg->flags |= THROTL_TG_CANCELING;
1564
1565                 /*
1566                  * Do not dispatch cgroup without THROTL_TG_PENDING or cgroup
1567                  * will be inserted to service queue without THROTL_TG_PENDING
1568                  * set in tg_update_disptime below. Then IO dispatched from
1569                  * child in tg_dispatch_one_bio will trigger double insertion
1570                  * and corrupt the tree.
1571                  */
1572                 if (!(tg->flags & THROTL_TG_PENDING))
1573                         continue;
1574
1575                 /*
1576                  * Update disptime after setting the above flag to make sure
1577                  * throtl_select_dispatch() won't exit without dispatching.
1578                  */
1579                 tg_update_disptime(tg);
1580
1581                 throtl_schedule_pending_timer(sq, jiffies + 1);
1582         }
1583         rcu_read_unlock();
1584         spin_unlock_irq(&q->queue_lock);
1585 }
1586
1587 static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
1588 {
1589         /* throtl is FIFO - if bios are already queued, should queue */
1590         if (tg->service_queue.nr_queued[rw])
1591                 return false;
1592
1593         return tg_may_dispatch(tg, bio, NULL);
1594 }
1595
1596 static void tg_dispatch_in_debt(struct throtl_grp *tg, struct bio *bio, bool rw)
1597 {
1598         if (!bio_flagged(bio, BIO_BPS_THROTTLED))
1599                 tg->carryover_bytes[rw] -= throtl_bio_data_size(bio);
1600         tg->carryover_ios[rw]--;
1601 }
1602
1603 bool __blk_throtl_bio(struct bio *bio)
1604 {
1605         struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1606         struct blkcg_gq *blkg = bio->bi_blkg;
1607         struct throtl_qnode *qn = NULL;
1608         struct throtl_grp *tg = blkg_to_tg(blkg);
1609         struct throtl_service_queue *sq;
1610         bool rw = bio_data_dir(bio);
1611         bool throttled = false;
1612         struct throtl_data *td = tg->td;
1613
1614         rcu_read_lock();
1615         spin_lock_irq(&q->queue_lock);
1616         sq = &tg->service_queue;
1617
1618         while (true) {
1619                 if (tg_within_limit(tg, bio, rw)) {
1620                         /* within limits, let's charge and dispatch directly */
1621                         throtl_charge_bio(tg, bio);
1622
1623                         /*
1624                          * We need to trim slice even when bios are not being
1625                          * queued otherwise it might happen that a bio is not
1626                          * queued for a long time and slice keeps on extending
1627                          * and trim is not called for a long time. Now if limits
1628                          * are reduced suddenly we take into account all the IO
1629                          * dispatched so far at new low rate and * newly queued
1630                          * IO gets a really long dispatch time.
1631                          *
1632                          * So keep on trimming slice even if bio is not queued.
1633                          */
1634                         throtl_trim_slice(tg, rw);
1635                 } else if (bio_issue_as_root_blkg(bio)) {
1636                         /*
1637                          * IOs which may cause priority inversions are
1638                          * dispatched directly, even if they're over limit.
1639                          * Debts are handled by carryover_bytes/ios while
1640                          * calculating wait time.
1641                          */
1642                         tg_dispatch_in_debt(tg, bio, rw);
1643                 } else {
1644                         /* if above limits, break to queue */
1645                         break;
1646                 }
1647
1648                 /*
1649                  * @bio passed through this layer without being throttled.
1650                  * Climb up the ladder.  If we're already at the top, it
1651                  * can be executed directly.
1652                  */
1653                 qn = &tg->qnode_on_parent[rw];
1654                 sq = sq->parent_sq;
1655                 tg = sq_to_tg(sq);
1656                 if (!tg) {
1657                         bio_set_flag(bio, BIO_BPS_THROTTLED);
1658                         goto out_unlock;
1659                 }
1660         }
1661
1662         /* out-of-limit, queue to @tg */
1663         throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1664                    rw == READ ? 'R' : 'W',
1665                    tg->bytes_disp[rw], bio->bi_iter.bi_size,
1666                    tg_bps_limit(tg, rw),
1667                    tg->io_disp[rw], tg_iops_limit(tg, rw),
1668                    sq->nr_queued[READ], sq->nr_queued[WRITE]);
1669
1670         td->nr_queued[rw]++;
1671         throtl_add_bio_tg(bio, qn, tg);
1672         throttled = true;
1673
1674         /*
1675          * Update @tg's dispatch time and force schedule dispatch if @tg
1676          * was empty before @bio.  The forced scheduling isn't likely to
1677          * cause undue delay as @bio is likely to be dispatched directly if
1678          * its @tg's disptime is not in the future.
1679          */
1680         if (tg->flags & THROTL_TG_WAS_EMPTY) {
1681                 tg_update_disptime(tg);
1682                 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
1683         }
1684
1685 out_unlock:
1686         spin_unlock_irq(&q->queue_lock);
1687
1688         rcu_read_unlock();
1689         return throttled;
1690 }
1691
1692 void blk_throtl_exit(struct gendisk *disk)
1693 {
1694         struct request_queue *q = disk->queue;
1695
1696         if (!blk_throtl_activated(q))
1697                 return;
1698
1699         del_timer_sync(&q->td->service_queue.pending_timer);
1700         throtl_shutdown_wq(q);
1701         blkcg_deactivate_policy(disk, &blkcg_policy_throtl);
1702         kfree(q->td);
1703 }
1704
1705 static int __init throtl_init(void)
1706 {
1707         kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1708         if (!kthrotld_workqueue)
1709                 panic("Failed to create kthrotld\n");
1710
1711         return blkcg_policy_register(&blkcg_policy_throtl);
1712 }
1713
1714 module_init(throtl_init);