blkcg: factor out blkio_group creation
[linux-2.6-block.git] / block / blk-throttle.c
CommitLineData
e43473b7
VG
1/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
12#include "blk-cgroup.h"
bc9fcbf9 13#include "blk.h"
e43473b7
VG
14
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
450adcbe
VG
24/* A workqueue to queue throttle related work */
25static struct workqueue_struct *kthrotld_workqueue;
26static void throtl_schedule_delayed_work(struct throtl_data *td,
27 unsigned long delay);
28
e43473b7
VG
29struct throtl_rb_root {
30 struct rb_root rb;
31 struct rb_node *left;
32 unsigned int count;
33 unsigned long min_disptime;
34};
35
36#define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
37 .count = 0, .min_disptime = 0}
38
39#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
40
41struct throtl_grp {
42 /* List of throtl groups on the request queue*/
43 struct hlist_node tg_node;
44
45 /* active throtl group service_tree member */
46 struct rb_node rb_node;
47
48 /*
49 * Dispatch time in jiffies. This is the estimated time when group
50 * will unthrottle and is ready to dispatch more bio. It is used as
51 * key to sort active groups in service tree.
52 */
53 unsigned long disptime;
54
55 struct blkio_group blkg;
56 atomic_t ref;
57 unsigned int flags;
58
59 /* Two lists for READ and WRITE */
60 struct bio_list bio_lists[2];
61
62 /* Number of queued bios on READ and WRITE lists */
63 unsigned int nr_queued[2];
64
65 /* bytes per second rate limits */
66 uint64_t bps[2];
67
8e89d13f
VG
68 /* IOPS limits */
69 unsigned int iops[2];
70
e43473b7
VG
71 /* Number of bytes disptached in current slice */
72 uint64_t bytes_disp[2];
8e89d13f
VG
73 /* Number of bio's dispatched in current slice */
74 unsigned int io_disp[2];
e43473b7
VG
75
76 /* When did we start a new slice */
77 unsigned long slice_start[2];
78 unsigned long slice_end[2];
fe071437
VG
79
80 /* Some throttle limits got updated for the group */
6f037937 81 int limits_changed;
4843c69d
VG
82
83 struct rcu_head rcu_head;
e43473b7
VG
84};
85
86struct throtl_data
87{
88 /* List of throtl groups */
89 struct hlist_head tg_list;
90
91 /* service tree for active throtl groups */
92 struct throtl_rb_root tg_service_tree;
93
29b12589 94 struct throtl_grp *root_tg;
e43473b7
VG
95 struct request_queue *queue;
96
97 /* Total Number of queued bios on READ and WRITE lists */
98 unsigned int nr_queued[2];
99
100 /*
02977e4a 101 * number of total undestroyed groups
e43473b7
VG
102 */
103 unsigned int nr_undestroyed_grps;
104
105 /* Work for dispatching throttled bios */
106 struct delayed_work throtl_work;
fe071437 107
6f037937 108 int limits_changed;
e43473b7
VG
109};
110
111enum tg_state_flags {
112 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
113};
114
115#define THROTL_TG_FNS(name) \
116static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
117{ \
118 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
119} \
120static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
121{ \
122 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
123} \
124static inline int throtl_tg_##name(const struct throtl_grp *tg) \
125{ \
126 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
127}
128
129THROTL_TG_FNS(on_rr);
130
131#define throtl_log_tg(td, tg, fmt, args...) \
132 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
133 blkg_path(&(tg)->blkg), ##args); \
134
135#define throtl_log(td, fmt, args...) \
136 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
137
138static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
139{
140 if (blkg)
141 return container_of(blkg, struct throtl_grp, blkg);
142
143 return NULL;
144}
145
d2f31a5f 146static inline unsigned int total_nr_queued(struct throtl_data *td)
e43473b7 147{
d2f31a5f 148 return td->nr_queued[0] + td->nr_queued[1];
e43473b7
VG
149}
150
151static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
152{
153 atomic_inc(&tg->ref);
154 return tg;
155}
156
4843c69d
VG
157static void throtl_free_tg(struct rcu_head *head)
158{
159 struct throtl_grp *tg;
160
161 tg = container_of(head, struct throtl_grp, rcu_head);
5624a4e4 162 free_percpu(tg->blkg.stats_cpu);
4843c69d
VG
163 kfree(tg);
164}
165
e43473b7
VG
166static void throtl_put_tg(struct throtl_grp *tg)
167{
168 BUG_ON(atomic_read(&tg->ref) <= 0);
169 if (!atomic_dec_and_test(&tg->ref))
170 return;
4843c69d
VG
171
172 /*
173 * A group is freed in rcu manner. But having an rcu lock does not
174 * mean that one can access all the fields of blkg and assume these
175 * are valid. For example, don't try to follow throtl_data and
176 * request queue links.
177 *
178 * Having a reference to blkg under an rcu allows acess to only
179 * values local to groups like group stats and group rate limits
180 */
181 call_rcu(&tg->rcu_head, throtl_free_tg);
e43473b7
VG
182}
183
cd1604fa
TH
184static struct blkio_group *throtl_alloc_blkio_group(struct request_queue *q,
185 struct blkio_cgroup *blkcg)
a29a171e 186{
cd1604fa
TH
187 struct throtl_grp *tg;
188
189 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, q->node);
190 if (!tg)
191 return NULL;
192
a29a171e
VG
193 INIT_HLIST_NODE(&tg->tg_node);
194 RB_CLEAR_NODE(&tg->rb_node);
195 bio_list_init(&tg->bio_lists[0]);
196 bio_list_init(&tg->bio_lists[1]);
197 tg->limits_changed = false;
198
cd1604fa
TH
199 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
200 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
201 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
202 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
a29a171e
VG
203
204 /*
205 * Take the initial reference that will be released on destroy
206 * This can be thought of a joint reference by cgroup and
207 * request queue which will be dropped by either request queue
208 * exit or cgroup deletion path depending on who is exiting first.
209 */
210 atomic_set(&tg->ref, 1);
a29a171e 211
cd1604fa 212 return &tg->blkg;
a29a171e
VG
213}
214
269f5415
VG
215static void
216__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
f469a7b4
VG
217{
218 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
219 unsigned int major, minor;
220
269f5415
VG
221 if (!tg || tg->blkg.dev)
222 return;
223
224 /*
225 * Fill in device details for a group which might not have been
226 * filled at group creation time as queue was being instantiated
227 * and driver had not attached a device yet
228 */
229 if (bdi->dev && dev_name(bdi->dev)) {
230 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
231 tg->blkg.dev = MKDEV(major, minor);
232 }
233}
234
af75cd3c
VG
235/*
236 * Should be called with without queue lock held. Here queue lock will be
237 * taken rarely. It will be taken only once during life time of a group
238 * if need be
239 */
240static void
241throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
242{
243 if (!tg || tg->blkg.dev)
244 return;
245
246 spin_lock_irq(td->queue->queue_lock);
247 __throtl_tg_fill_dev_details(td, tg);
248 spin_unlock_irq(td->queue->queue_lock);
249}
250
cd1604fa
TH
251static void throtl_link_blkio_group(struct request_queue *q,
252 struct blkio_group *blkg)
269f5415 253{
cd1604fa
TH
254 struct throtl_data *td = q->td;
255 struct throtl_grp *tg = tg_of_blkg(blkg);
5624a4e4 256
cd1604fa 257 __throtl_tg_fill_dev_details(td, tg);
5624a4e4 258
cd1604fa
TH
259 hlist_add_head(&tg->tg_node, &td->tg_list);
260 td->nr_undestroyed_grps++;
f469a7b4
VG
261}
262
263static struct
cd1604fa 264throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
e43473b7 265{
e43473b7 266 struct throtl_grp *tg = NULL;
e43473b7 267
be2c6b19
VG
268 /*
269 * This is the common case when there are no blkio cgroups.
cd1604fa
TH
270 * Avoid lookup in this case
271 */
be2c6b19 272 if (blkcg == &blkio_root_cgroup)
29b12589 273 tg = td->root_tg;
be2c6b19 274 else
cd1604fa
TH
275 tg = tg_of_blkg(blkg_lookup(blkcg, td->queue,
276 BLKIO_POLICY_THROTL));
e43473b7 277
269f5415 278 __throtl_tg_fill_dev_details(td, tg);
e43473b7
VG
279 return tg;
280}
281
cd1604fa
TH
282static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
283 struct blkio_cgroup *blkcg)
e43473b7 284{
f469a7b4 285 struct request_queue *q = td->queue;
cd1604fa 286 struct throtl_grp *tg = NULL;
bc16a4f9 287
f469a7b4 288 /*
cd1604fa
TH
289 * This is the common case when there are no blkio cgroups.
290 * Avoid lookup in this case
f469a7b4 291 */
cd1604fa
TH
292 if (blkcg == &blkio_root_cgroup) {
293 tg = td->root_tg;
294 } else {
295 struct blkio_group *blkg;
f469a7b4 296
cd1604fa 297 blkg = blkg_lookup_create(blkcg, q, BLKIO_POLICY_THROTL, false);
f469a7b4 298
cd1604fa
TH
299 /* if %NULL and @q is alive, fall back to root_tg */
300 if (!IS_ERR(blkg))
301 tg = tg_of_blkg(blkg);
302 else if (!blk_queue_dead(q))
303 tg = td->root_tg;
f469a7b4
VG
304 }
305
cd1604fa 306 __throtl_tg_fill_dev_details(td, tg);
e43473b7
VG
307 return tg;
308}
309
310static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
311{
312 /* Service tree is empty */
313 if (!root->count)
314 return NULL;
315
316 if (!root->left)
317 root->left = rb_first(&root->rb);
318
319 if (root->left)
320 return rb_entry_tg(root->left);
321
322 return NULL;
323}
324
325static void rb_erase_init(struct rb_node *n, struct rb_root *root)
326{
327 rb_erase(n, root);
328 RB_CLEAR_NODE(n);
329}
330
331static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
332{
333 if (root->left == n)
334 root->left = NULL;
335 rb_erase_init(n, &root->rb);
336 --root->count;
337}
338
339static void update_min_dispatch_time(struct throtl_rb_root *st)
340{
341 struct throtl_grp *tg;
342
343 tg = throtl_rb_first(st);
344 if (!tg)
345 return;
346
347 st->min_disptime = tg->disptime;
348}
349
350static void
351tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
352{
353 struct rb_node **node = &st->rb.rb_node;
354 struct rb_node *parent = NULL;
355 struct throtl_grp *__tg;
356 unsigned long key = tg->disptime;
357 int left = 1;
358
359 while (*node != NULL) {
360 parent = *node;
361 __tg = rb_entry_tg(parent);
362
363 if (time_before(key, __tg->disptime))
364 node = &parent->rb_left;
365 else {
366 node = &parent->rb_right;
367 left = 0;
368 }
369 }
370
371 if (left)
372 st->left = &tg->rb_node;
373
374 rb_link_node(&tg->rb_node, parent, node);
375 rb_insert_color(&tg->rb_node, &st->rb);
376}
377
378static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
379{
380 struct throtl_rb_root *st = &td->tg_service_tree;
381
382 tg_service_tree_add(st, tg);
383 throtl_mark_tg_on_rr(tg);
384 st->count++;
385}
386
387static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
388{
389 if (!throtl_tg_on_rr(tg))
390 __throtl_enqueue_tg(td, tg);
391}
392
393static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
394{
395 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
396 throtl_clear_tg_on_rr(tg);
397}
398
399static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
400{
401 if (throtl_tg_on_rr(tg))
402 __throtl_dequeue_tg(td, tg);
403}
404
405static void throtl_schedule_next_dispatch(struct throtl_data *td)
406{
407 struct throtl_rb_root *st = &td->tg_service_tree;
408
409 /*
410 * If there are more bios pending, schedule more work.
411 */
412 if (!total_nr_queued(td))
413 return;
414
415 BUG_ON(!st->count);
416
417 update_min_dispatch_time(st);
418
419 if (time_before_eq(st->min_disptime, jiffies))
450adcbe 420 throtl_schedule_delayed_work(td, 0);
e43473b7 421 else
450adcbe 422 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
e43473b7
VG
423}
424
425static inline void
426throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
427{
428 tg->bytes_disp[rw] = 0;
8e89d13f 429 tg->io_disp[rw] = 0;
e43473b7
VG
430 tg->slice_start[rw] = jiffies;
431 tg->slice_end[rw] = jiffies + throtl_slice;
432 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
433 rw == READ ? 'R' : 'W', tg->slice_start[rw],
434 tg->slice_end[rw], jiffies);
435}
436
d1ae8ffd
VG
437static inline void throtl_set_slice_end(struct throtl_data *td,
438 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
439{
440 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
441}
442
e43473b7
VG
443static inline void throtl_extend_slice(struct throtl_data *td,
444 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
445{
446 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
447 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
448 rw == READ ? 'R' : 'W', tg->slice_start[rw],
449 tg->slice_end[rw], jiffies);
450}
451
452/* Determine if previously allocated or extended slice is complete or not */
453static bool
454throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
455{
456 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
457 return 0;
458
459 return 1;
460}
461
462/* Trim the used slices and adjust slice start accordingly */
463static inline void
464throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
465{
3aad5d3e
VG
466 unsigned long nr_slices, time_elapsed, io_trim;
467 u64 bytes_trim, tmp;
e43473b7
VG
468
469 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
470
471 /*
472 * If bps are unlimited (-1), then time slice don't get
473 * renewed. Don't try to trim the slice if slice is used. A new
474 * slice will start when appropriate.
475 */
476 if (throtl_slice_used(td, tg, rw))
477 return;
478
d1ae8ffd
VG
479 /*
480 * A bio has been dispatched. Also adjust slice_end. It might happen
481 * that initially cgroup limit was very low resulting in high
482 * slice_end, but later limit was bumped up and bio was dispached
483 * sooner, then we need to reduce slice_end. A high bogus slice_end
484 * is bad because it does not allow new slice to start.
485 */
486
487 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
488
e43473b7
VG
489 time_elapsed = jiffies - tg->slice_start[rw];
490
491 nr_slices = time_elapsed / throtl_slice;
492
493 if (!nr_slices)
494 return;
3aad5d3e
VG
495 tmp = tg->bps[rw] * throtl_slice * nr_slices;
496 do_div(tmp, HZ);
497 bytes_trim = tmp;
e43473b7 498
8e89d13f 499 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
e43473b7 500
8e89d13f 501 if (!bytes_trim && !io_trim)
e43473b7
VG
502 return;
503
504 if (tg->bytes_disp[rw] >= bytes_trim)
505 tg->bytes_disp[rw] -= bytes_trim;
506 else
507 tg->bytes_disp[rw] = 0;
508
8e89d13f
VG
509 if (tg->io_disp[rw] >= io_trim)
510 tg->io_disp[rw] -= io_trim;
511 else
512 tg->io_disp[rw] = 0;
513
e43473b7
VG
514 tg->slice_start[rw] += nr_slices * throtl_slice;
515
3aad5d3e 516 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
e43473b7 517 " start=%lu end=%lu jiffies=%lu",
8e89d13f 518 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
e43473b7
VG
519 tg->slice_start[rw], tg->slice_end[rw], jiffies);
520}
521
8e89d13f
VG
522static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
523 struct bio *bio, unsigned long *wait)
e43473b7
VG
524{
525 bool rw = bio_data_dir(bio);
8e89d13f 526 unsigned int io_allowed;
e43473b7 527 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
c49c06e4 528 u64 tmp;
e43473b7 529
8e89d13f 530 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
e43473b7 531
8e89d13f
VG
532 /* Slice has just started. Consider one slice interval */
533 if (!jiffy_elapsed)
534 jiffy_elapsed_rnd = throtl_slice;
535
536 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
537
c49c06e4
VG
538 /*
539 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
540 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
541 * will allow dispatch after 1 second and after that slice should
542 * have been trimmed.
543 */
544
545 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
546 do_div(tmp, HZ);
547
548 if (tmp > UINT_MAX)
549 io_allowed = UINT_MAX;
550 else
551 io_allowed = tmp;
8e89d13f
VG
552
553 if (tg->io_disp[rw] + 1 <= io_allowed) {
e43473b7
VG
554 if (wait)
555 *wait = 0;
556 return 1;
557 }
558
8e89d13f
VG
559 /* Calc approx time to dispatch */
560 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
561
562 if (jiffy_wait > jiffy_elapsed)
563 jiffy_wait = jiffy_wait - jiffy_elapsed;
564 else
565 jiffy_wait = 1;
566
567 if (wait)
568 *wait = jiffy_wait;
569 return 0;
570}
571
572static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
573 struct bio *bio, unsigned long *wait)
574{
575 bool rw = bio_data_dir(bio);
3aad5d3e 576 u64 bytes_allowed, extra_bytes, tmp;
8e89d13f 577 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
e43473b7
VG
578
579 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
580
581 /* Slice has just started. Consider one slice interval */
582 if (!jiffy_elapsed)
583 jiffy_elapsed_rnd = throtl_slice;
584
585 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
586
5e901a2b
VG
587 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
588 do_div(tmp, HZ);
3aad5d3e 589 bytes_allowed = tmp;
e43473b7
VG
590
591 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
592 if (wait)
593 *wait = 0;
594 return 1;
595 }
596
597 /* Calc approx time to dispatch */
598 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
599 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
600
601 if (!jiffy_wait)
602 jiffy_wait = 1;
603
604 /*
605 * This wait time is without taking into consideration the rounding
606 * up we did. Add that time also.
607 */
608 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
e43473b7
VG
609 if (wait)
610 *wait = jiffy_wait;
8e89d13f
VG
611 return 0;
612}
613
af75cd3c
VG
614static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
615 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
616 return 1;
617 return 0;
618}
619
8e89d13f
VG
620/*
621 * Returns whether one can dispatch a bio or not. Also returns approx number
622 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
623 */
624static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
625 struct bio *bio, unsigned long *wait)
626{
627 bool rw = bio_data_dir(bio);
628 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
629
630 /*
631 * Currently whole state machine of group depends on first bio
632 * queued in the group bio list. So one should not be calling
633 * this function with a different bio if there are other bios
634 * queued.
635 */
636 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
e43473b7 637
8e89d13f
VG
638 /* If tg->bps = -1, then BW is unlimited */
639 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
640 if (wait)
641 *wait = 0;
642 return 1;
643 }
644
645 /*
646 * If previous slice expired, start a new one otherwise renew/extend
647 * existing slice to make sure it is at least throtl_slice interval
648 * long since now.
649 */
650 if (throtl_slice_used(td, tg, rw))
651 throtl_start_new_slice(td, tg, rw);
652 else {
653 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
654 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
655 }
656
657 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
658 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
659 if (wait)
660 *wait = 0;
661 return 1;
662 }
663
664 max_wait = max(bps_wait, iops_wait);
665
666 if (wait)
667 *wait = max_wait;
668
669 if (time_before(tg->slice_end[rw], jiffies + max_wait))
670 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
e43473b7
VG
671
672 return 0;
673}
674
675static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
676{
677 bool rw = bio_data_dir(bio);
e5a94f56 678 bool sync = rw_is_sync(bio->bi_rw);
e43473b7
VG
679
680 /* Charge the bio to the group */
681 tg->bytes_disp[rw] += bio->bi_size;
8e89d13f 682 tg->io_disp[rw]++;
e43473b7 683
e43473b7 684 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
e43473b7
VG
685}
686
687static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
688 struct bio *bio)
689{
690 bool rw = bio_data_dir(bio);
691
692 bio_list_add(&tg->bio_lists[rw], bio);
693 /* Take a bio reference on tg */
694 throtl_ref_get_tg(tg);
695 tg->nr_queued[rw]++;
696 td->nr_queued[rw]++;
697 throtl_enqueue_tg(td, tg);
698}
699
700static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
701{
702 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
703 struct bio *bio;
704
705 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
706 tg_may_dispatch(td, tg, bio, &read_wait);
707
708 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
709 tg_may_dispatch(td, tg, bio, &write_wait);
710
711 min_wait = min(read_wait, write_wait);
712 disptime = jiffies + min_wait;
713
e43473b7
VG
714 /* Update dispatch time */
715 throtl_dequeue_tg(td, tg);
716 tg->disptime = disptime;
717 throtl_enqueue_tg(td, tg);
718}
719
720static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
721 bool rw, struct bio_list *bl)
722{
723 struct bio *bio;
724
725 bio = bio_list_pop(&tg->bio_lists[rw]);
726 tg->nr_queued[rw]--;
727 /* Drop bio reference on tg */
728 throtl_put_tg(tg);
729
730 BUG_ON(td->nr_queued[rw] <= 0);
731 td->nr_queued[rw]--;
732
733 throtl_charge_bio(tg, bio);
734 bio_list_add(bl, bio);
735 bio->bi_rw |= REQ_THROTTLED;
736
737 throtl_trim_slice(td, tg, rw);
738}
739
740static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
741 struct bio_list *bl)
742{
743 unsigned int nr_reads = 0, nr_writes = 0;
744 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
c2f6805d 745 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
e43473b7
VG
746 struct bio *bio;
747
748 /* Try to dispatch 75% READS and 25% WRITES */
749
750 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
751 && tg_may_dispatch(td, tg, bio, NULL)) {
752
753 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
754 nr_reads++;
755
756 if (nr_reads >= max_nr_reads)
757 break;
758 }
759
760 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
761 && tg_may_dispatch(td, tg, bio, NULL)) {
762
763 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
764 nr_writes++;
765
766 if (nr_writes >= max_nr_writes)
767 break;
768 }
769
770 return nr_reads + nr_writes;
771}
772
773static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
774{
775 unsigned int nr_disp = 0;
776 struct throtl_grp *tg;
777 struct throtl_rb_root *st = &td->tg_service_tree;
778
779 while (1) {
780 tg = throtl_rb_first(st);
781
782 if (!tg)
783 break;
784
785 if (time_before(jiffies, tg->disptime))
786 break;
787
788 throtl_dequeue_tg(td, tg);
789
790 nr_disp += throtl_dispatch_tg(td, tg, bl);
791
792 if (tg->nr_queued[0] || tg->nr_queued[1]) {
793 tg_update_disptime(td, tg);
794 throtl_enqueue_tg(td, tg);
795 }
796
797 if (nr_disp >= throtl_quantum)
798 break;
799 }
800
801 return nr_disp;
802}
803
fe071437
VG
804static void throtl_process_limit_change(struct throtl_data *td)
805{
806 struct throtl_grp *tg;
807 struct hlist_node *pos, *n;
808
de701c74 809 if (!td->limits_changed)
fe071437
VG
810 return;
811
de701c74 812 xchg(&td->limits_changed, false);
fe071437 813
de701c74 814 throtl_log(td, "limits changed");
fe071437 815
04a6b516 816 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
de701c74
VG
817 if (!tg->limits_changed)
818 continue;
819
820 if (!xchg(&tg->limits_changed, false))
821 continue;
822
823 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
824 " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
825 tg->iops[READ], tg->iops[WRITE]);
826
04521db0
VG
827 /*
828 * Restart the slices for both READ and WRITES. It
829 * might happen that a group's limit are dropped
830 * suddenly and we don't want to account recently
831 * dispatched IO with new low rate
832 */
833 throtl_start_new_slice(td, tg, 0);
834 throtl_start_new_slice(td, tg, 1);
835
de701c74 836 if (throtl_tg_on_rr(tg))
fe071437 837 tg_update_disptime(td, tg);
fe071437 838 }
fe071437
VG
839}
840
e43473b7
VG
841/* Dispatch throttled bios. Should be called without queue lock held. */
842static int throtl_dispatch(struct request_queue *q)
843{
844 struct throtl_data *td = q->td;
845 unsigned int nr_disp = 0;
846 struct bio_list bio_list_on_stack;
847 struct bio *bio;
69d60eb9 848 struct blk_plug plug;
e43473b7
VG
849
850 spin_lock_irq(q->queue_lock);
851
fe071437
VG
852 throtl_process_limit_change(td);
853
e43473b7
VG
854 if (!total_nr_queued(td))
855 goto out;
856
857 bio_list_init(&bio_list_on_stack);
858
d2f31a5f 859 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
e43473b7
VG
860 total_nr_queued(td), td->nr_queued[READ],
861 td->nr_queued[WRITE]);
862
863 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
864
865 if (nr_disp)
866 throtl_log(td, "bios disp=%u", nr_disp);
867
868 throtl_schedule_next_dispatch(td);
869out:
870 spin_unlock_irq(q->queue_lock);
871
872 /*
873 * If we dispatched some requests, unplug the queue to make sure
874 * immediate dispatch
875 */
876 if (nr_disp) {
69d60eb9 877 blk_start_plug(&plug);
e43473b7
VG
878 while((bio = bio_list_pop(&bio_list_on_stack)))
879 generic_make_request(bio);
69d60eb9 880 blk_finish_plug(&plug);
e43473b7
VG
881 }
882 return nr_disp;
883}
884
885void blk_throtl_work(struct work_struct *work)
886{
887 struct throtl_data *td = container_of(work, struct throtl_data,
888 throtl_work.work);
889 struct request_queue *q = td->queue;
890
891 throtl_dispatch(q);
892}
893
894/* Call with queue lock held */
450adcbe
VG
895static void
896throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
e43473b7
VG
897{
898
e43473b7
VG
899 struct delayed_work *dwork = &td->throtl_work;
900
04521db0 901 /* schedule work if limits changed even if no bio is queued */
d2f31a5f 902 if (total_nr_queued(td) || td->limits_changed) {
e43473b7
VG
903 /*
904 * We might have a work scheduled to be executed in future.
905 * Cancel that and schedule a new one.
906 */
907 __cancel_delayed_work(dwork);
450adcbe 908 queue_delayed_work(kthrotld_workqueue, dwork, delay);
e43473b7
VG
909 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
910 delay, jiffies);
911 }
912}
e43473b7
VG
913
914static void
915throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
916{
917 /* Something wrong if we are trying to remove same group twice */
918 BUG_ON(hlist_unhashed(&tg->tg_node));
919
920 hlist_del_init(&tg->tg_node);
921
922 /*
923 * Put the reference taken at the time of creation so that when all
924 * queues are gone, group can be destroyed.
925 */
926 throtl_put_tg(tg);
927 td->nr_undestroyed_grps--;
928}
929
72e06c25 930static bool throtl_release_tgs(struct throtl_data *td, bool release_root)
e43473b7
VG
931{
932 struct hlist_node *pos, *n;
933 struct throtl_grp *tg;
72e06c25 934 bool empty = true;
e43473b7
VG
935
936 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
72e06c25
TH
937 /* skip root? */
938 if (!release_root && tg == td->root_tg)
939 continue;
940
e43473b7
VG
941 /*
942 * If cgroup removal path got to blk_group first and removed
943 * it from cgroup list, then it will take care of destroying
944 * cfqg also.
945 */
946 if (!blkiocg_del_blkio_group(&tg->blkg))
947 throtl_destroy_tg(td, tg);
72e06c25
TH
948 else
949 empty = false;
e43473b7 950 }
72e06c25 951 return empty;
e43473b7
VG
952}
953
e43473b7
VG
954/*
955 * Blk cgroup controller notification saying that blkio_group object is being
956 * delinked as associated cgroup object is going away. That also means that
957 * no new IO will come in this group. So get rid of this group as soon as
958 * any pending IO in the group is finished.
959 *
ca32aefc
TH
960 * This function is called under rcu_read_lock(). @q is the rcu protected
961 * pointer. That means @q is a valid request_queue pointer as long as we
962 * are rcu read lock.
e43473b7 963 *
ca32aefc 964 * @q was fetched from blkio_group under blkio_cgroup->lock. That means
e43473b7
VG
965 * it should not be NULL as even if queue was going away, cgroup deltion
966 * path got to it first.
967 */
ca32aefc
TH
968void throtl_unlink_blkio_group(struct request_queue *q,
969 struct blkio_group *blkg)
e43473b7
VG
970{
971 unsigned long flags;
e43473b7 972
ca32aefc
TH
973 spin_lock_irqsave(q->queue_lock, flags);
974 throtl_destroy_tg(q->td, tg_of_blkg(blkg));
975 spin_unlock_irqrestore(q->queue_lock, flags);
e43473b7
VG
976}
977
72e06c25
TH
978static bool throtl_clear_queue(struct request_queue *q)
979{
980 lockdep_assert_held(q->queue_lock);
981
982 /*
983 * Clear tgs but leave the root one alone. This is necessary
984 * because root_tg is expected to be persistent and safe because
985 * blk-throtl can never be disabled while @q is alive. This is a
986 * kludge to prepare for unified blkg. This whole function will be
987 * removed soon.
988 */
989 return throtl_release_tgs(q->td, false);
990}
991
de701c74
VG
992static void throtl_update_blkio_group_common(struct throtl_data *td,
993 struct throtl_grp *tg)
994{
995 xchg(&tg->limits_changed, true);
996 xchg(&td->limits_changed, true);
997 /* Schedule a work now to process the limit change */
998 throtl_schedule_delayed_work(td, 0);
999}
1000
fe071437 1001/*
ca32aefc 1002 * For all update functions, @q should be a valid pointer because these
fe071437 1003 * update functions are called under blkcg_lock, that means, blkg is
ca32aefc 1004 * valid and in turn @q is valid. queue exit path can not race because
fe071437
VG
1005 * of blkcg_lock
1006 *
1007 * Can not take queue lock in update functions as queue lock under blkcg_lock
1008 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
1009 */
ca32aefc 1010static void throtl_update_blkio_group_read_bps(struct request_queue *q,
fe071437 1011 struct blkio_group *blkg, u64 read_bps)
e43473b7 1012{
de701c74 1013 struct throtl_grp *tg = tg_of_blkg(blkg);
fe071437 1014
de701c74 1015 tg->bps[READ] = read_bps;
ca32aefc 1016 throtl_update_blkio_group_common(q->td, tg);
e43473b7
VG
1017}
1018
ca32aefc 1019static void throtl_update_blkio_group_write_bps(struct request_queue *q,
fe071437 1020 struct blkio_group *blkg, u64 write_bps)
e43473b7 1021{
de701c74 1022 struct throtl_grp *tg = tg_of_blkg(blkg);
fe071437 1023
de701c74 1024 tg->bps[WRITE] = write_bps;
ca32aefc 1025 throtl_update_blkio_group_common(q->td, tg);
e43473b7
VG
1026}
1027
ca32aefc 1028static void throtl_update_blkio_group_read_iops(struct request_queue *q,
fe071437 1029 struct blkio_group *blkg, unsigned int read_iops)
8e89d13f 1030{
de701c74 1031 struct throtl_grp *tg = tg_of_blkg(blkg);
fe071437 1032
de701c74 1033 tg->iops[READ] = read_iops;
ca32aefc 1034 throtl_update_blkio_group_common(q->td, tg);
8e89d13f
VG
1035}
1036
ca32aefc 1037static void throtl_update_blkio_group_write_iops(struct request_queue *q,
fe071437 1038 struct blkio_group *blkg, unsigned int write_iops)
8e89d13f 1039{
de701c74 1040 struct throtl_grp *tg = tg_of_blkg(blkg);
fe071437 1041
de701c74 1042 tg->iops[WRITE] = write_iops;
ca32aefc 1043 throtl_update_blkio_group_common(q->td, tg);
8e89d13f
VG
1044}
1045
da527770 1046static void throtl_shutdown_wq(struct request_queue *q)
e43473b7
VG
1047{
1048 struct throtl_data *td = q->td;
1049
1050 cancel_delayed_work_sync(&td->throtl_work);
1051}
1052
1053static struct blkio_policy_type blkio_policy_throtl = {
1054 .ops = {
cd1604fa
TH
1055 .blkio_alloc_group_fn = throtl_alloc_blkio_group,
1056 .blkio_link_group_fn = throtl_link_blkio_group,
e43473b7 1057 .blkio_unlink_group_fn = throtl_unlink_blkio_group,
72e06c25 1058 .blkio_clear_queue_fn = throtl_clear_queue,
e43473b7
VG
1059 .blkio_update_group_read_bps_fn =
1060 throtl_update_blkio_group_read_bps,
1061 .blkio_update_group_write_bps_fn =
1062 throtl_update_blkio_group_write_bps,
8e89d13f
VG
1063 .blkio_update_group_read_iops_fn =
1064 throtl_update_blkio_group_read_iops,
1065 .blkio_update_group_write_iops_fn =
1066 throtl_update_blkio_group_write_iops,
e43473b7 1067 },
8e89d13f 1068 .plid = BLKIO_POLICY_THROTL,
e43473b7
VG
1069};
1070
bc16a4f9 1071bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
e43473b7
VG
1072{
1073 struct throtl_data *td = q->td;
1074 struct throtl_grp *tg;
e43473b7 1075 bool rw = bio_data_dir(bio), update_disptime = true;
af75cd3c 1076 struct blkio_cgroup *blkcg;
bc16a4f9 1077 bool throttled = false;
e43473b7
VG
1078
1079 if (bio->bi_rw & REQ_THROTTLED) {
1080 bio->bi_rw &= ~REQ_THROTTLED;
bc16a4f9 1081 goto out;
e43473b7
VG
1082 }
1083
af75cd3c
VG
1084 /*
1085 * A throtl_grp pointer retrieved under rcu can be used to access
1086 * basic fields like stats and io rates. If a group has no rules,
1087 * just update the dispatch stats in lockless manner and return.
1088 */
af75cd3c
VG
1089 rcu_read_lock();
1090 blkcg = task_blkio_cgroup(current);
cd1604fa 1091 tg = throtl_lookup_tg(td, blkcg);
af75cd3c
VG
1092 if (tg) {
1093 throtl_tg_fill_dev_details(td, tg);
1094
1095 if (tg_no_rule_group(tg, rw)) {
1096 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
e5a94f56 1097 rw, rw_is_sync(bio->bi_rw));
2a7f1244 1098 goto out_unlock_rcu;
af75cd3c
VG
1099 }
1100 }
af75cd3c
VG
1101
1102 /*
1103 * Either group has not been allocated yet or it is not an unlimited
1104 * IO group
1105 */
e43473b7 1106 spin_lock_irq(q->queue_lock);
cd1604fa 1107 tg = throtl_lookup_create_tg(td, blkcg);
bc16a4f9
TH
1108 if (unlikely(!tg))
1109 goto out_unlock;
f469a7b4 1110
e43473b7
VG
1111 if (tg->nr_queued[rw]) {
1112 /*
1113 * There is already another bio queued in same dir. No
1114 * need to update dispatch time.
1115 */
231d704b 1116 update_disptime = false;
e43473b7 1117 goto queue_bio;
de701c74 1118
e43473b7
VG
1119 }
1120
1121 /* Bio is with-in rate limit of group */
1122 if (tg_may_dispatch(td, tg, bio, NULL)) {
1123 throtl_charge_bio(tg, bio);
04521db0
VG
1124
1125 /*
1126 * We need to trim slice even when bios are not being queued
1127 * otherwise it might happen that a bio is not queued for
1128 * a long time and slice keeps on extending and trim is not
1129 * called for a long time. Now if limits are reduced suddenly
1130 * we take into account all the IO dispatched so far at new
1131 * low rate and * newly queued IO gets a really long dispatch
1132 * time.
1133 *
1134 * So keep on trimming slice even if bio is not queued.
1135 */
1136 throtl_trim_slice(td, tg, rw);
bc16a4f9 1137 goto out_unlock;
e43473b7
VG
1138 }
1139
1140queue_bio:
fd16d263 1141 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
8e89d13f
VG
1142 " iodisp=%u iops=%u queued=%d/%d",
1143 rw == READ ? 'R' : 'W',
e43473b7 1144 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
8e89d13f 1145 tg->io_disp[rw], tg->iops[rw],
e43473b7
VG
1146 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1147
1148 throtl_add_bio_tg(q->td, tg, bio);
bc16a4f9 1149 throttled = true;
e43473b7
VG
1150
1151 if (update_disptime) {
1152 tg_update_disptime(td, tg);
1153 throtl_schedule_next_dispatch(td);
1154 }
1155
bc16a4f9 1156out_unlock:
e43473b7 1157 spin_unlock_irq(q->queue_lock);
2a7f1244
TH
1158out_unlock_rcu:
1159 rcu_read_unlock();
bc16a4f9
TH
1160out:
1161 return throttled;
e43473b7
VG
1162}
1163
c9a929dd
TH
1164/**
1165 * blk_throtl_drain - drain throttled bios
1166 * @q: request_queue to drain throttled bios for
1167 *
1168 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1169 */
1170void blk_throtl_drain(struct request_queue *q)
1171 __releases(q->queue_lock) __acquires(q->queue_lock)
1172{
1173 struct throtl_data *td = q->td;
1174 struct throtl_rb_root *st = &td->tg_service_tree;
1175 struct throtl_grp *tg;
1176 struct bio_list bl;
1177 struct bio *bio;
1178
334c2b0b 1179 WARN_ON_ONCE(!queue_is_locked(q));
c9a929dd
TH
1180
1181 bio_list_init(&bl);
1182
1183 while ((tg = throtl_rb_first(st))) {
1184 throtl_dequeue_tg(td, tg);
1185
1186 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1187 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1188 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
1189 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1190 }
1191 spin_unlock_irq(q->queue_lock);
1192
1193 while ((bio = bio_list_pop(&bl)))
1194 generic_make_request(bio);
1195
1196 spin_lock_irq(q->queue_lock);
1197}
1198
e43473b7
VG
1199int blk_throtl_init(struct request_queue *q)
1200{
1201 struct throtl_data *td;
cd1604fa 1202 struct blkio_group *blkg;
e43473b7
VG
1203
1204 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1205 if (!td)
1206 return -ENOMEM;
1207
1208 INIT_HLIST_HEAD(&td->tg_list);
1209 td->tg_service_tree = THROTL_RB_ROOT;
de701c74 1210 td->limits_changed = false;
a29a171e 1211 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
e43473b7 1212
cd1604fa 1213 q->td = td;
29b12589 1214 td->queue = q;
02977e4a 1215
cd1604fa 1216 /* alloc and init root group. */
f51b802c
TH
1217 rcu_read_lock();
1218 spin_lock_irq(q->queue_lock);
29b12589 1219
cd1604fa
TH
1220 blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_THROTL,
1221 true);
1222 if (!IS_ERR(blkg))
1223 td->root_tg = tg_of_blkg(blkg);
e43473b7 1224
f51b802c 1225 spin_unlock_irq(q->queue_lock);
e43473b7
VG
1226 rcu_read_unlock();
1227
f51b802c
TH
1228 if (!td->root_tg) {
1229 kfree(td);
1230 return -ENOMEM;
1231 }
e43473b7
VG
1232 return 0;
1233}
1234
1235void blk_throtl_exit(struct request_queue *q)
1236{
1237 struct throtl_data *td = q->td;
1238 bool wait = false;
1239
1240 BUG_ON(!td);
1241
da527770 1242 throtl_shutdown_wq(q);
e43473b7
VG
1243
1244 spin_lock_irq(q->queue_lock);
72e06c25 1245 throtl_release_tgs(td, true);
e43473b7
VG
1246
1247 /* If there are other groups */
02977e4a 1248 if (td->nr_undestroyed_grps > 0)
e43473b7
VG
1249 wait = true;
1250
1251 spin_unlock_irq(q->queue_lock);
1252
1253 /*
ca32aefc 1254 * Wait for tg->blkg->q accessors to exit their grace periods.
e43473b7
VG
1255 * Do this wait only if there are other undestroyed groups out
1256 * there (other than root group). This can happen if cgroup deletion
1257 * path claimed the responsibility of cleaning up a group before
1258 * queue cleanup code get to the group.
1259 *
1260 * Do not call synchronize_rcu() unconditionally as there are drivers
1261 * which create/delete request queue hundreds of times during scan/boot
1262 * and synchronize_rcu() can take significant time and slow down boot.
1263 */
1264 if (wait)
1265 synchronize_rcu();
fe071437
VG
1266
1267 /*
1268 * Just being safe to make sure after previous flush if some body did
1269 * update limits through cgroup and another work got queued, cancel
1270 * it.
1271 */
da527770 1272 throtl_shutdown_wq(q);
c9a929dd
TH
1273}
1274
1275void blk_throtl_release(struct request_queue *q)
1276{
1277 kfree(q->td);
e43473b7
VG
1278}
1279
1280static int __init throtl_init(void)
1281{
450adcbe
VG
1282 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1283 if (!kthrotld_workqueue)
1284 panic("Failed to create kthrotld\n");
1285
e43473b7
VG
1286 blkio_policy_register(&blkio_policy_throtl);
1287 return 0;
1288}
1289
1290module_init(throtl_init);