Merge tag 'io_uring-6.2-2023-01-27' of git://git.kernel.dk/linux
[linux-block.git] / block / blk-cgroup.c
CommitLineData
3dcf60bc 1// SPDX-License-Identifier: GPL-2.0
31e4c28d
VG
2/*
3 * Common Block IO controller cgroup interface
4 *
5 * Based on ideas and code from CFQ, CFS and BFQ:
6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 *
8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
9 * Paolo Valente <paolo.valente@unimore.it>
10 *
11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
12 * Nauman Rafique <nauman@google.com>
e48453c3
AA
13 *
14 * For policy-specific per-blkcg data:
15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
16 * Arianna Avanzini <avanzini.arianna@gmail.com>
31e4c28d
VG
17 */
18#include <linux/ioprio.h>
22084190 19#include <linux/kdev_t.h>
9d6a986c 20#include <linux/module.h>
174cd4b1 21#include <linux/sched/signal.h>
accee785 22#include <linux/err.h>
9195291e 23#include <linux/blkdev.h>
52ebea74 24#include <linux/backing-dev.h>
5a0e3ad6 25#include <linux/slab.h>
72e06c25 26#include <linux/delay.h>
9a9e8a26 27#include <linux/atomic.h>
36aa9e5f 28#include <linux/ctype.h>
03248add 29#include <linux/resume_user_mode.h>
fd112c74 30#include <linux/psi.h>
82d981d4 31#include <linux/part_stat.h>
5efd6113 32#include "blk.h"
672fdcf0 33#include "blk-cgroup.h"
556910e3 34#include "blk-ioprio.h"
a7b36ee6 35#include "blk-throttle.h"
813e6930 36#include "blk-rq-qos.h"
3e252066 37
838f13bf
TH
38/*
39 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
40 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
41 * policy [un]register operations including cgroup file additions /
42 * removals. Putting cgroup file registration outside blkcg_pol_mutex
43 * allows grabbing it from cgroup callbacks.
44 */
45static DEFINE_MUTEX(blkcg_pol_register_mutex);
bc0d6501 46static DEFINE_MUTEX(blkcg_pol_mutex);
923adde1 47
e48453c3 48struct blkcg blkcg_root;
3c798398 49EXPORT_SYMBOL_GPL(blkcg_root);
9d6a986c 50
496d5e75 51struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
9b0eb69b 52EXPORT_SYMBOL_GPL(blkcg_root_css);
496d5e75 53
3c798398 54static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
035d10b2 55
7876f930
TH
56static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
57
07b0fdec 58bool blkcg_debug_stats = false;
d3f77dfd 59static struct workqueue_struct *blkcg_punt_bio_wq;
903d23f0 60
a731763f
YK
61#define BLKG_DESTROY_BATCH_SIZE 64
62
3b8cc629
WL
63/*
64 * Lockless lists for tracking IO stats update
65 *
66 * New IO stats are stored in the percpu iostat_cpu within blkcg_gq (blkg).
67 * There are multiple blkg's (one for each block device) attached to each
68 * blkcg. The rstat code keeps track of which cpu has IO stats updated,
69 * but it doesn't know which blkg has the updated stats. If there are many
70 * block devices in a system, the cost of iterating all the blkg's to flush
71 * out the IO stats can be high. To reduce such overhead, a set of percpu
72 * lockless lists (lhead) per blkcg are used to track the set of recently
73 * updated iostat_cpu's since the last flush. An iostat_cpu will be put
74 * onto the lockless list on the update side [blk_cgroup_bio_start()] if
75 * not there yet and then removed when being flushed [blkcg_rstat_flush()].
76 * References to blkg are gotten and then put back in the process to
77 * protect against blkg removal.
78 *
79 * Return: 0 if successful or -ENOMEM if allocation fails.
80 */
81static int init_blkcg_llists(struct blkcg *blkcg)
82{
83 int cpu;
84
85 blkcg->lhead = alloc_percpu_gfp(struct llist_head, GFP_KERNEL);
86 if (!blkcg->lhead)
87 return -ENOMEM;
88
89 for_each_possible_cpu(cpu)
90 init_llist_head(per_cpu_ptr(blkcg->lhead, cpu));
91 return 0;
92}
93
bc5fee91
CH
94/**
95 * blkcg_css - find the current css
96 *
97 * Find the css associated with either the kthread or the current task.
98 * This may return a dying css, so it is up to the caller to use tryget logic
99 * to confirm it is alive and well.
100 */
101static struct cgroup_subsys_state *blkcg_css(void)
102{
103 struct cgroup_subsys_state *css;
104
105 css = kthread_blkcg();
106 if (css)
107 return css;
108 return task_css(current, io_cgrp_id);
109}
110
a2b1693b 111static bool blkcg_policy_enabled(struct request_queue *q,
3c798398 112 const struct blkcg_policy *pol)
a2b1693b
TH
113{
114 return pol && test_bit(pol->plid, q->blkcg_pols);
115}
116
d578c770 117static void blkg_free_workfn(struct work_struct *work)
0381411e 118{
d578c770
ML
119 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
120 free_work);
e8989fae 121 int i;
549d3aa8 122
db613670 123 for (i = 0; i < BLKCG_MAX_POLS; i++)
001bea73
TH
124 if (blkg->pd[i])
125 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
e8989fae 126
0a9a25ca
ML
127 if (blkg->q)
128 blk_put_queue(blkg->q);
f7331648 129 free_percpu(blkg->iostat_cpu);
ef069b97 130 percpu_ref_exit(&blkg->refcnt);
549d3aa8 131 kfree(blkg);
0381411e
TH
132}
133
d578c770
ML
134/**
135 * blkg_free - free a blkg
136 * @blkg: blkg to free
137 *
138 * Free @blkg which may be partially allocated.
139 */
140static void blkg_free(struct blkcg_gq *blkg)
141{
142 if (!blkg)
143 return;
144
145 /*
146 * Both ->pd_free_fn() and request queue's release handler may
147 * sleep, so free us by scheduling one work func
148 */
149 INIT_WORK(&blkg->free_work, blkg_free_workfn);
150 schedule_work(&blkg->free_work);
151}
152
7fcf2b03
DZ
153static void __blkg_release(struct rcu_head *rcu)
154{
155 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
156
d3f77dfd
TH
157 WARN_ON(!bio_list_empty(&blkg->async_bios));
158
7fcf2b03
DZ
159 /* release the blkcg and parent blkg refs this blkg has been holding */
160 css_put(&blkg->blkcg->css);
161 if (blkg->parent)
162 blkg_put(blkg->parent);
7fcf2b03
DZ
163 blkg_free(blkg);
164}
165
166/*
167 * A group is RCU protected, but having an rcu lock does not mean that one
168 * can access all the fields of blkg and assume these are valid. For
169 * example, don't try to follow throtl_data and request queue links.
170 *
171 * Having a reference to blkg under an rcu allows accesses to only values
172 * local to groups like group stats and group rate limits.
173 */
174static void blkg_release(struct percpu_ref *ref)
175{
176 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
177
178 call_rcu(&blkg->rcu_head, __blkg_release);
179}
180
d3f77dfd
TH
181static void blkg_async_bio_workfn(struct work_struct *work)
182{
183 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
184 async_bio_work);
185 struct bio_list bios = BIO_EMPTY_LIST;
186 struct bio *bio;
192f1c6b
XT
187 struct blk_plug plug;
188 bool need_plug = false;
d3f77dfd
TH
189
190 /* as long as there are pending bios, @blkg can't go away */
191 spin_lock_bh(&blkg->async_bio_lock);
192 bio_list_merge(&bios, &blkg->async_bios);
193 bio_list_init(&blkg->async_bios);
194 spin_unlock_bh(&blkg->async_bio_lock);
195
192f1c6b
XT
196 /* start plug only when bio_list contains at least 2 bios */
197 if (bios.head && bios.head->bi_next) {
198 need_plug = true;
199 blk_start_plug(&plug);
200 }
d3f77dfd
TH
201 while ((bio = bio_list_pop(&bios)))
202 submit_bio(bio);
192f1c6b
XT
203 if (need_plug)
204 blk_finish_plug(&plug);
d3f77dfd
TH
205}
206
bbb1ebe7
CH
207/**
208 * bio_blkcg_css - return the blkcg CSS associated with a bio
209 * @bio: target bio
210 *
211 * This returns the CSS for the blkcg associated with a bio, or %NULL if not
212 * associated. Callers are expected to either handle %NULL or know association
213 * has been done prior to calling this.
214 */
215struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
216{
217 if (!bio || !bio->bi_blkg)
218 return NULL;
219 return &bio->bi_blkg->blkcg->css;
220}
221EXPORT_SYMBOL_GPL(bio_blkcg_css);
222
397c9f46
CH
223/**
224 * blkcg_parent - get the parent of a blkcg
225 * @blkcg: blkcg of interest
226 *
227 * Return the parent blkcg of @blkcg. Can be called anytime.
228 */
229static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
230{
231 return css_to_blkcg(blkcg->css.parent);
232}
233
0381411e
TH
234/**
235 * blkg_alloc - allocate a blkg
236 * @blkcg: block cgroup the new blkg is associated with
99e60387 237 * @disk: gendisk the new blkg is associated with
15974993 238 * @gfp_mask: allocation mask to use
0381411e 239 *
e8989fae 240 * Allocate a new blkg assocating @blkcg and @q.
0381411e 241 */
99e60387 242static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
15974993 243 gfp_t gfp_mask)
0381411e 244{
3c798398 245 struct blkcg_gq *blkg;
f7331648 246 int i, cpu;
0381411e
TH
247
248 /* alloc and init base part */
99e60387 249 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node);
0381411e
TH
250 if (!blkg)
251 return NULL;
252
ef069b97
TH
253 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
254 goto err_free;
255
f7331648
TH
256 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask);
257 if (!blkg->iostat_cpu)
77ea7338
TH
258 goto err_free;
259
99e60387 260 if (!blk_get_queue(disk->queue))
0a9a25ca
ML
261 goto err_free;
262
99e60387 263 blkg->q = disk->queue;
e8989fae 264 INIT_LIST_HEAD(&blkg->q_node);
d3f77dfd
TH
265 spin_lock_init(&blkg->async_bio_lock);
266 bio_list_init(&blkg->async_bios);
267 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
0381411e 268 blkg->blkcg = blkcg;
0381411e 269
f7331648 270 u64_stats_init(&blkg->iostat.sync);
3b8cc629 271 for_each_possible_cpu(cpu) {
f7331648 272 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync);
3b8cc629
WL
273 per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg;
274 }
f7331648 275
8bd435b3 276 for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398 277 struct blkcg_policy *pol = blkcg_policy[i];
e8989fae 278 struct blkg_policy_data *pd;
0381411e 279
99e60387 280 if (!blkcg_policy_enabled(disk->queue, pol))
e8989fae
TH
281 continue;
282
283 /* alloc per-policy data and attach it to blkg */
99e60387 284 pd = pol->pd_alloc_fn(gfp_mask, disk->queue, blkcg);
a051661c
TH
285 if (!pd)
286 goto err_free;
549d3aa8 287
e8989fae
TH
288 blkg->pd[i] = pd;
289 pd->blkg = blkg;
b276a876 290 pd->plid = i;
e8989fae
TH
291 }
292
0381411e 293 return blkg;
a051661c
TH
294
295err_free:
296 blkg_free(blkg);
297 return NULL;
0381411e
TH
298}
299
15974993 300/*
d708f0d5
JA
301 * If @new_blkg is %NULL, this function tries to allocate a new one as
302 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
15974993 303 */
99e60387 304static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
d708f0d5 305 struct blkcg_gq *new_blkg)
5624a4e4 306{
d708f0d5 307 struct blkcg_gq *blkg;
f427d909 308 int i, ret;
5624a4e4 309
99e60387 310 lockdep_assert_held(&disk->queue->queue_lock);
cd1604fa 311
0273ac34 312 /* request_queue is dying, do not create/recreate a blkg */
99e60387 313 if (blk_queue_dying(disk->queue)) {
0273ac34
DZ
314 ret = -ENODEV;
315 goto err_free_blkg;
316 }
317
7ee9c562 318 /* blkg holds a reference to blkcg */
ec903c0c 319 if (!css_tryget_online(&blkcg->css)) {
20386ce0 320 ret = -ENODEV;
93e6d5d8 321 goto err_free_blkg;
15974993 322 }
cd1604fa 323
d708f0d5
JA
324 /* allocate */
325 if (!new_blkg) {
99e60387 326 new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN);
d708f0d5
JA
327 if (unlikely(!new_blkg)) {
328 ret = -ENOMEM;
8c911f3d 329 goto err_put_css;
15974993
TH
330 }
331 }
d708f0d5 332 blkg = new_blkg;
cd1604fa 333
db613670 334 /* link parent */
3c547865 335 if (blkcg_parent(blkcg)) {
99e60387 336 blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue);
3c547865 337 if (WARN_ON_ONCE(!blkg->parent)) {
20386ce0 338 ret = -ENODEV;
8c911f3d 339 goto err_put_css;
3c547865
TH
340 }
341 blkg_get(blkg->parent);
342 }
343
db613670
TH
344 /* invoke per-policy init */
345 for (i = 0; i < BLKCG_MAX_POLS; i++) {
346 struct blkcg_policy *pol = blkcg_policy[i];
347
348 if (blkg->pd[i] && pol->pd_init_fn)
a9520cd6 349 pol->pd_init_fn(blkg->pd[i]);
db613670
TH
350 }
351
352 /* insert */
cd1604fa 353 spin_lock(&blkcg->lock);
99e60387 354 ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg);
a637120e
TH
355 if (likely(!ret)) {
356 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
99e60387 357 list_add(&blkg->q_node, &disk->queue->blkg_list);
f427d909
TH
358
359 for (i = 0; i < BLKCG_MAX_POLS; i++) {
360 struct blkcg_policy *pol = blkcg_policy[i];
361
362 if (blkg->pd[i] && pol->pd_online_fn)
a9520cd6 363 pol->pd_online_fn(blkg->pd[i]);
f427d909 364 }
a637120e 365 }
f427d909 366 blkg->online = true;
cd1604fa 367 spin_unlock(&blkcg->lock);
496fb780 368
ec13b1d6 369 if (!ret)
a637120e 370 return blkg;
15974993 371
3c547865
TH
372 /* @blkg failed fully initialized, use the usual release path */
373 blkg_put(blkg);
374 return ERR_PTR(ret);
375
d708f0d5 376err_put_css:
496fb780 377 css_put(&blkcg->css);
93e6d5d8 378err_free_blkg:
d708f0d5 379 blkg_free(new_blkg);
93e6d5d8 380 return ERR_PTR(ret);
31e4c28d 381}
3c96cb32 382
86cde6b6 383/**
8c546287 384 * blkg_lookup_create - lookup blkg, try to create one if not there
86cde6b6 385 * @blkcg: blkcg of interest
99e60387 386 * @disk: gendisk of interest
86cde6b6 387 *
99e60387 388 * Lookup blkg for the @blkcg - @disk pair. If it doesn't exist, try to
3c547865
TH
389 * create one. blkg creation is performed recursively from blkcg_root such
390 * that all non-root blkg's have access to the parent blkg. This function
99e60387 391 * should be called under RCU read lock and takes @disk->queue->queue_lock.
86cde6b6 392 *
beea9da0
DZ
393 * Returns the blkg or the closest blkg if blkg_create() fails as it walks
394 * down from root.
86cde6b6 395 */
8c546287 396static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
99e60387 397 struct gendisk *disk)
3c96cb32 398{
99e60387 399 struct request_queue *q = disk->queue;
86cde6b6 400 struct blkcg_gq *blkg;
8c546287 401 unsigned long flags;
86cde6b6
TH
402
403 WARN_ON_ONCE(!rcu_read_lock_held());
86cde6b6 404
8c546287 405 blkg = blkg_lookup(blkcg, q);
86cde6b6
TH
406 if (blkg)
407 return blkg;
408
8c546287 409 spin_lock_irqsave(&q->queue_lock, flags);
4a69f325
CH
410 blkg = blkg_lookup(blkcg, q);
411 if (blkg) {
5765033c
CH
412 if (blkcg != &blkcg_root &&
413 blkg != rcu_dereference(blkcg->blkg_hint))
414 rcu_assign_pointer(blkcg->blkg_hint, blkg);
8c546287 415 goto found;
4a69f325 416 }
8c546287 417
3c547865
TH
418 /*
419 * Create blkgs walking down from blkcg_root to @blkcg, so that all
beea9da0
DZ
420 * non-root blkgs have access to their parents. Returns the closest
421 * blkg to the intended blkg should blkg_create() fail.
3c547865
TH
422 */
423 while (true) {
424 struct blkcg *pos = blkcg;
425 struct blkcg *parent = blkcg_parent(blkcg);
beea9da0
DZ
426 struct blkcg_gq *ret_blkg = q->root_blkg;
427
428 while (parent) {
79fcc5be 429 blkg = blkg_lookup(parent, q);
beea9da0
DZ
430 if (blkg) {
431 /* remember closest blkg */
432 ret_blkg = blkg;
433 break;
434 }
3c547865
TH
435 pos = parent;
436 parent = blkcg_parent(parent);
437 }
438
99e60387 439 blkg = blkg_create(pos, disk, NULL);
8c546287
CH
440 if (IS_ERR(blkg)) {
441 blkg = ret_blkg;
442 break;
443 }
beea9da0 444 if (pos == blkcg)
8c546287 445 break;
b978962a
DZ
446 }
447
8c546287
CH
448found:
449 spin_unlock_irqrestore(&q->queue_lock, flags);
b978962a
DZ
450 return blkg;
451}
452
3c798398 453static void blkg_destroy(struct blkcg_gq *blkg)
03aa264a 454{
3c798398 455 struct blkcg *blkcg = blkg->blkcg;
6b065462 456 int i;
03aa264a 457
0d945c1f 458 lockdep_assert_held(&blkg->q->queue_lock);
9f13ef67 459 lockdep_assert_held(&blkcg->lock);
03aa264a
TH
460
461 /* Something wrong if we are trying to remove same group twice */
e8989fae 462 WARN_ON_ONCE(list_empty(&blkg->q_node));
9f13ef67 463 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
a637120e 464
6b065462
DZF
465 for (i = 0; i < BLKCG_MAX_POLS; i++) {
466 struct blkcg_policy *pol = blkcg_policy[i];
467
468 if (blkg->pd[i] && pol->pd_offline_fn)
469 pol->pd_offline_fn(blkg->pd[i]);
470 }
471
f427d909
TH
472 blkg->online = false;
473
a637120e 474 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
e8989fae 475 list_del_init(&blkg->q_node);
9f13ef67 476 hlist_del_init_rcu(&blkg->blkcg_node);
03aa264a 477
a637120e
TH
478 /*
479 * Both setting lookup hint to and clearing it from @blkg are done
480 * under queue_lock. If it's not pointing to @blkg now, it never
481 * will. Hint assignment itself can race safely.
482 */
ec6c676a 483 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
a637120e
TH
484 rcu_assign_pointer(blkcg->blkg_hint, NULL);
485
03aa264a
TH
486 /*
487 * Put the reference taken at the time of creation so that when all
488 * queues are gone, group can be destroyed.
489 */
7fcf2b03 490 percpu_ref_kill(&blkg->refcnt);
03aa264a
TH
491}
492
00ad6991 493static void blkg_destroy_all(struct gendisk *disk)
72e06c25 494{
00ad6991 495 struct request_queue *q = disk->queue;
3c798398 496 struct blkcg_gq *blkg, *n;
a731763f 497 int count = BLKG_DESTROY_BATCH_SIZE;
72e06c25 498
a731763f 499restart:
0d945c1f 500 spin_lock_irq(&q->queue_lock);
9f13ef67 501 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
3c798398 502 struct blkcg *blkcg = blkg->blkcg;
72e06c25 503
9f13ef67
TH
504 spin_lock(&blkcg->lock);
505 blkg_destroy(blkg);
506 spin_unlock(&blkcg->lock);
a731763f
YK
507
508 /*
509 * in order to avoid holding the spin lock for too long, release
510 * it when a batch of blkgs are destroyed.
511 */
512 if (!(--count)) {
513 count = BLKG_DESTROY_BATCH_SIZE;
514 spin_unlock_irq(&q->queue_lock);
515 cond_resched();
516 goto restart;
517 }
72e06c25 518 }
6fe810bd
TH
519
520 q->root_blkg = NULL;
0d945c1f 521 spin_unlock_irq(&q->queue_lock);
72e06c25
TH
522}
523
182446d0
TH
524static int blkcg_reset_stats(struct cgroup_subsys_state *css,
525 struct cftype *cftype, u64 val)
303a3acb 526{
182446d0 527 struct blkcg *blkcg = css_to_blkcg(css);
3c798398 528 struct blkcg_gq *blkg;
f7331648 529 int i, cpu;
303a3acb 530
838f13bf 531 mutex_lock(&blkcg_pol_mutex);
303a3acb 532 spin_lock_irq(&blkcg->lock);
997a026c
TH
533
534 /*
535 * Note that stat reset is racy - it doesn't synchronize against
536 * stat updates. This is a debug feature which shouldn't exist
537 * anyway. If you get hit by a race, retry.
538 */
b67bfe0d 539 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
f7331648
TH
540 for_each_possible_cpu(cpu) {
541 struct blkg_iostat_set *bis =
542 per_cpu_ptr(blkg->iostat_cpu, cpu);
543 memset(bis, 0, sizeof(*bis));
544 }
545 memset(&blkg->iostat, 0, sizeof(blkg->iostat));
77ea7338 546
8bd435b3 547 for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398 548 struct blkcg_policy *pol = blkcg_policy[i];
549d3aa8 549
a9520cd6
TH
550 if (blkg->pd[i] && pol->pd_reset_stats_fn)
551 pol->pd_reset_stats_fn(blkg->pd[i]);
bc0d6501 552 }
303a3acb 553 }
f0bdc8cd 554
303a3acb 555 spin_unlock_irq(&blkcg->lock);
bc0d6501 556 mutex_unlock(&blkcg_pol_mutex);
303a3acb
DS
557 return 0;
558}
559
dd165eb3 560const char *blkg_dev_name(struct blkcg_gq *blkg)
303a3acb 561{
d152c682 562 if (!blkg->q->disk || !blkg->q->disk->bdi->dev)
edb0872f 563 return NULL;
d152c682 564 return bdi_dev_name(blkg->q->disk->bdi);
303a3acb
DS
565}
566
d3d32e69
TH
567/**
568 * blkcg_print_blkgs - helper for printing per-blkg data
569 * @sf: seq_file to print to
570 * @blkcg: blkcg of interest
571 * @prfill: fill function to print out a blkg
572 * @pol: policy in question
573 * @data: data to be passed to @prfill
574 * @show_total: to print out sum of prfill return values or not
575 *
576 * This function invokes @prfill on each blkg of @blkcg if pd for the
577 * policy specified by @pol exists. @prfill is invoked with @sf, the
810ecfa7
TH
578 * policy data and @data and the matching queue lock held. If @show_total
579 * is %true, the sum of the return values from @prfill is printed with
580 * "Total" label at the end.
d3d32e69
TH
581 *
582 * This is to be used to construct print functions for
583 * cftype->read_seq_string method.
584 */
3c798398 585void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
f95a04af
TH
586 u64 (*prfill)(struct seq_file *,
587 struct blkg_policy_data *, int),
3c798398 588 const struct blkcg_policy *pol, int data,
ec399347 589 bool show_total)
5624a4e4 590{
3c798398 591 struct blkcg_gq *blkg;
d3d32e69 592 u64 total = 0;
5624a4e4 593
810ecfa7 594 rcu_read_lock();
ee89f812 595 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
0d945c1f 596 spin_lock_irq(&blkg->q->queue_lock);
a2b1693b 597 if (blkcg_policy_enabled(blkg->q, pol))
f95a04af 598 total += prfill(sf, blkg->pd[pol->plid], data);
0d945c1f 599 spin_unlock_irq(&blkg->q->queue_lock);
810ecfa7
TH
600 }
601 rcu_read_unlock();
d3d32e69
TH
602
603 if (show_total)
604 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
605}
829fdb50 606EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
d3d32e69
TH
607
608/**
609 * __blkg_prfill_u64 - prfill helper for a single u64 value
610 * @sf: seq_file to print to
f95a04af 611 * @pd: policy private data of interest
d3d32e69
TH
612 * @v: value to print
613 *
37754595 614 * Print @v to @sf for the device associated with @pd.
d3d32e69 615 */
f95a04af 616u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
d3d32e69 617{
f95a04af 618 const char *dname = blkg_dev_name(pd->blkg);
d3d32e69
TH
619
620 if (!dname)
621 return 0;
622
623 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
624 return v;
625}
829fdb50 626EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
d3d32e69 627
015d254c 628/**
22ae8ce8 629 * blkcg_conf_open_bdev - parse and open bdev for per-blkg config update
015d254c
TH
630 * @inputp: input string pointer
631 *
632 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update
22ae8ce8 633 * from @input and get and return the matching bdev. *@inputp is
015d254c
TH
634 * updated to point past the device node prefix. Returns an ERR_PTR()
635 * value on error.
636 *
637 * Use this function iff blkg_conf_prep() can't be used for some reason.
638 */
22ae8ce8 639struct block_device *blkcg_conf_open_bdev(char **inputp)
015d254c
TH
640{
641 char *input = *inputp;
642 unsigned int major, minor;
22ae8ce8
CH
643 struct block_device *bdev;
644 int key_len;
015d254c
TH
645
646 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
647 return ERR_PTR(-EINVAL);
648
649 input += key_len;
650 if (!isspace(*input))
651 return ERR_PTR(-EINVAL);
652 input = skip_spaces(input);
653
22ae8ce8
CH
654 bdev = blkdev_get_no_open(MKDEV(major, minor));
655 if (!bdev)
015d254c 656 return ERR_PTR(-ENODEV);
22ae8ce8
CH
657 if (bdev_is_partition(bdev)) {
658 blkdev_put_no_open(bdev);
015d254c
TH
659 return ERR_PTR(-ENODEV);
660 }
661
662 *inputp = input;
22ae8ce8 663 return bdev;
015d254c
TH
664}
665
3a8b31d3
TH
666/**
667 * blkg_conf_prep - parse and prepare for per-blkg config update
668 * @blkcg: target block cgroup
da8b0662 669 * @pol: target policy
3a8b31d3
TH
670 * @input: input string
671 * @ctx: blkg_conf_ctx to be filled
672 *
673 * Parse per-blkg config update from @input and initialize @ctx with the
36aa9e5f
TH
674 * result. @ctx->blkg points to the blkg to be updated and @ctx->body the
675 * part of @input following MAJ:MIN. This function returns with RCU read
676 * lock and queue lock held and must be paired with blkg_conf_finish().
3a8b31d3 677 */
3c798398 678int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
36aa9e5f 679 char *input, struct blkg_conf_ctx *ctx)
ed6cddef 680 __acquires(rcu) __acquires(&bdev->bd_queue->queue_lock)
34d0f179 681{
22ae8ce8 682 struct block_device *bdev;
99e60387 683 struct gendisk *disk;
457e490f 684 struct request_queue *q;
3c798398 685 struct blkcg_gq *blkg;
015d254c 686 int ret;
36aa9e5f 687
22ae8ce8
CH
688 bdev = blkcg_conf_open_bdev(&input);
689 if (IS_ERR(bdev))
690 return PTR_ERR(bdev);
99e60387
CH
691 disk = bdev->bd_disk;
692 q = disk->queue;
da8b0662 693
0c9d338c
YK
694 /*
695 * blkcg_deactivate_policy() requires queue to be frozen, we can grab
696 * q_usage_counter to prevent concurrent with blkcg_deactivate_policy().
697 */
698 ret = blk_queue_enter(q, 0);
699 if (ret)
15c30104 700 goto fail;
0c9d338c 701
457e490f 702 rcu_read_lock();
0d945c1f 703 spin_lock_irq(&q->queue_lock);
e56da7e2 704
f753526e
CH
705 if (!blkcg_policy_enabled(q, pol)) {
706 ret = -EOPNOTSUPP;
457e490f
TE
707 goto fail_unlock;
708 }
709
f753526e 710 blkg = blkg_lookup(blkcg, q);
5765033c 711 if (blkg)
457e490f
TE
712 goto success;
713
714 /*
715 * Create blkgs walking down from blkcg_root to @blkcg, so that all
716 * non-root blkgs have access to their parents.
717 */
718 while (true) {
719 struct blkcg *pos = blkcg;
720 struct blkcg *parent;
721 struct blkcg_gq *new_blkg;
722
723 parent = blkcg_parent(blkcg);
79fcc5be 724 while (parent && !blkg_lookup(parent, q)) {
457e490f
TE
725 pos = parent;
726 parent = blkcg_parent(parent);
727 }
728
729 /* Drop locks to do new blkg allocation with GFP_KERNEL. */
0d945c1f 730 spin_unlock_irq(&q->queue_lock);
3a8b31d3 731 rcu_read_unlock();
457e490f 732
99e60387 733 new_blkg = blkg_alloc(pos, disk, GFP_KERNEL);
457e490f
TE
734 if (unlikely(!new_blkg)) {
735 ret = -ENOMEM;
15c30104 736 goto fail_exit_queue;
7702e8f4 737 }
3a8b31d3 738
f255c19b
GKB
739 if (radix_tree_preload(GFP_KERNEL)) {
740 blkg_free(new_blkg);
741 ret = -ENOMEM;
15c30104 742 goto fail_exit_queue;
f255c19b
GKB
743 }
744
457e490f 745 rcu_read_lock();
0d945c1f 746 spin_lock_irq(&q->queue_lock);
457e490f 747
f753526e 748 if (!blkcg_policy_enabled(q, pol)) {
52abfcbd 749 blkg_free(new_blkg);
f753526e 750 ret = -EOPNOTSUPP;
f255c19b 751 goto fail_preloaded;
457e490f
TE
752 }
753
f753526e 754 blkg = blkg_lookup(pos, q);
457e490f
TE
755 if (blkg) {
756 blkg_free(new_blkg);
757 } else {
99e60387 758 blkg = blkg_create(pos, disk, new_blkg);
98d669b4 759 if (IS_ERR(blkg)) {
457e490f 760 ret = PTR_ERR(blkg);
f255c19b 761 goto fail_preloaded;
457e490f
TE
762 }
763 }
764
f255c19b
GKB
765 radix_tree_preload_end();
766
457e490f
TE
767 if (pos == blkcg)
768 goto success;
769 }
770success:
0c9d338c 771 blk_queue_exit(q);
22ae8ce8 772 ctx->bdev = bdev;
3a8b31d3 773 ctx->blkg = blkg;
015d254c 774 ctx->body = input;
726fa694 775 return 0;
457e490f 776
f255c19b
GKB
777fail_preloaded:
778 radix_tree_preload_end();
457e490f 779fail_unlock:
0d945c1f 780 spin_unlock_irq(&q->queue_lock);
457e490f 781 rcu_read_unlock();
15c30104
YK
782fail_exit_queue:
783 blk_queue_exit(q);
457e490f 784fail:
22ae8ce8 785 blkdev_put_no_open(bdev);
457e490f
TE
786 /*
787 * If queue was bypassing, we should retry. Do so after a
788 * short msleep(). It isn't strictly necessary but queue
789 * can be bypassing for some time and it's always nice to
790 * avoid busy looping.
791 */
792 if (ret == -EBUSY) {
793 msleep(10);
794 ret = restart_syscall();
795 }
796 return ret;
34d0f179 797}
89f3b6d6 798EXPORT_SYMBOL_GPL(blkg_conf_prep);
34d0f179 799
3a8b31d3
TH
800/**
801 * blkg_conf_finish - finish up per-blkg config update
37754595 802 * @ctx: blkg_conf_ctx initialized by blkg_conf_prep()
3a8b31d3
TH
803 *
804 * Finish up after per-blkg config update. This function must be paired
805 * with blkg_conf_prep().
806 */
829fdb50 807void blkg_conf_finish(struct blkg_conf_ctx *ctx)
ed6cddef 808 __releases(&ctx->bdev->bd_queue->queue_lock) __releases(rcu)
34d0f179 809{
ed6cddef 810 spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock);
3a8b31d3 811 rcu_read_unlock();
22ae8ce8 812 blkdev_put_no_open(ctx->bdev);
34d0f179 813}
89f3b6d6 814EXPORT_SYMBOL_GPL(blkg_conf_finish);
34d0f179 815
cd1fc4b9
BB
816static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
817{
818 int i;
819
820 for (i = 0; i < BLKG_IOSTAT_NR; i++) {
821 dst->bytes[i] = src->bytes[i];
822 dst->ios[i] = src->ios[i];
823 }
824}
825
826static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
827{
828 int i;
829
830 for (i = 0; i < BLKG_IOSTAT_NR; i++) {
831 dst->bytes[i] += src->bytes[i];
832 dst->ios[i] += src->ios[i];
833 }
834}
835
836static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src)
837{
838 int i;
839
840 for (i = 0; i < BLKG_IOSTAT_NR; i++) {
841 dst->bytes[i] -= src->bytes[i];
842 dst->ios[i] -= src->ios[i];
843 }
844}
845
362b8c16
JY
846static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur,
847 struct blkg_iostat *last)
848{
849 struct blkg_iostat delta;
850 unsigned long flags;
851
852 /* propagate percpu delta to global */
853 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
854 blkg_iostat_set(&delta, cur);
855 blkg_iostat_sub(&delta, last);
856 blkg_iostat_add(&blkg->iostat.cur, &delta);
857 blkg_iostat_add(last, &delta);
858 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
859}
860
cd1fc4b9
BB
861static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
862{
863 struct blkcg *blkcg = css_to_blkcg(css);
3b8cc629
WL
864 struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
865 struct llist_node *lnode;
866 struct blkg_iostat_set *bisc, *next_bisc;
cd1fc4b9 867
dc26532a
JW
868 /* Root-level stats are sourced from system-wide IO stats */
869 if (!cgroup_parent(css->cgroup))
870 return;
871
cd1fc4b9
BB
872 rcu_read_lock();
873
3b8cc629
WL
874 lnode = llist_del_all(lhead);
875 if (!lnode)
876 goto out;
877
878 /*
879 * Iterate only the iostat_cpu's queued in the lockless list.
880 */
881 llist_for_each_entry_safe(bisc, next_bisc, lnode, lnode) {
882 struct blkcg_gq *blkg = bisc->blkg;
cd1fc4b9 883 struct blkcg_gq *parent = blkg->parent;
362b8c16 884 struct blkg_iostat cur;
cd1fc4b9
BB
885 unsigned int seq;
886
3b8cc629
WL
887 WRITE_ONCE(bisc->lqueued, false);
888
cd1fc4b9
BB
889 /* fetch the current per-cpu values */
890 do {
891 seq = u64_stats_fetch_begin(&bisc->sync);
892 blkg_iostat_set(&cur, &bisc->cur);
893 } while (u64_stats_fetch_retry(&bisc->sync, seq));
894
362b8c16 895 blkcg_iostat_update(blkg, &cur, &bisc->last);
cd1fc4b9 896
dc26532a 897 /* propagate global delta to parent (unless that's root) */
362b8c16
JY
898 if (parent && parent->parent)
899 blkcg_iostat_update(parent, &blkg->iostat.cur,
900 &blkg->iostat.last);
3b8cc629 901 percpu_ref_put(&blkg->refcnt);
cd1fc4b9
BB
902 }
903
3b8cc629 904out:
cd1fc4b9
BB
905 rcu_read_unlock();
906}
907
ef45fe47 908/*
dc26532a
JW
909 * We source root cgroup stats from the system-wide stats to avoid
910 * tracking the same information twice and incurring overhead when no
911 * cgroups are defined. For that reason, cgroup_rstat_flush in
912 * blkcg_print_stat does not actually fill out the iostat in the root
913 * cgroup's blkcg_gq.
ef45fe47
BB
914 *
915 * However, we would like to re-use the printing code between the root and
916 * non-root cgroups to the extent possible. For that reason, we simulate
917 * flushing the root cgroup's stats by explicitly filling in the iostat
918 * with disk level statistics.
919 */
920static void blkcg_fill_root_iostats(void)
921{
922 struct class_dev_iter iter;
923 struct device *dev;
924
925 class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
926 while ((dev = class_dev_iter_next(&iter))) {
0d02129e 927 struct block_device *bdev = dev_to_bdev(dev);
928f6f00 928 struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg;
ef45fe47
BB
929 struct blkg_iostat tmp;
930 int cpu;
f122d103 931 unsigned long flags;
ef45fe47
BB
932
933 memset(&tmp, 0, sizeof(tmp));
934 for_each_possible_cpu(cpu) {
935 struct disk_stats *cpu_dkstats;
936
0d02129e 937 cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
ef45fe47
BB
938 tmp.ios[BLKG_IOSTAT_READ] +=
939 cpu_dkstats->ios[STAT_READ];
940 tmp.ios[BLKG_IOSTAT_WRITE] +=
941 cpu_dkstats->ios[STAT_WRITE];
942 tmp.ios[BLKG_IOSTAT_DISCARD] +=
943 cpu_dkstats->ios[STAT_DISCARD];
944 // convert sectors to bytes
945 tmp.bytes[BLKG_IOSTAT_READ] +=
946 cpu_dkstats->sectors[STAT_READ] << 9;
947 tmp.bytes[BLKG_IOSTAT_WRITE] +=
948 cpu_dkstats->sectors[STAT_WRITE] << 9;
949 tmp.bytes[BLKG_IOSTAT_DISCARD] +=
950 cpu_dkstats->sectors[STAT_DISCARD] << 9;
ef45fe47 951 }
f122d103
CZ
952
953 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
954 blkg_iostat_set(&blkg->iostat.cur, &tmp);
955 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
ef45fe47
BB
956 }
957}
958
49cb5168 959static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
2ee867dc 960{
49cb5168
CH
961 struct blkg_iostat_set *bis = &blkg->iostat;
962 u64 rbytes, wbytes, rios, wios, dbytes, dios;
49cb5168
CH
963 const char *dname;
964 unsigned seq;
49cb5168 965 int i;
ef45fe47 966
49cb5168
CH
967 if (!blkg->online)
968 return;
2ee867dc 969
49cb5168
CH
970 dname = blkg_dev_name(blkg);
971 if (!dname)
972 return;
2ee867dc 973
252c651a 974 seq_printf(s, "%s ", dname);
b0814361 975
49cb5168
CH
976 do {
977 seq = u64_stats_fetch_begin(&bis->sync);
b0814361 978
49cb5168
CH
979 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
980 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
981 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
982 rios = bis->cur.ios[BLKG_IOSTAT_READ];
983 wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
984 dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
985 } while (u64_stats_fetch_retry(&bis->sync, seq));
2ee867dc 986
49cb5168 987 if (rbytes || wbytes || rios || wios) {
252c651a 988 seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
49cb5168
CH
989 rbytes, wbytes, rios, wios,
990 dbytes, dios);
991 }
903d23f0 992
49cb5168 993 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
252c651a 994 seq_printf(s, " use_delay=%d delay_nsec=%llu",
49cb5168
CH
995 atomic_read(&blkg->use_delay),
996 atomic64_read(&blkg->delay_nsec));
997 }
2ee867dc 998
49cb5168
CH
999 for (i = 0; i < BLKCG_MAX_POLS; i++) {
1000 struct blkcg_policy *pol = blkcg_policy[i];
2ee867dc 1001
49cb5168
CH
1002 if (!blkg->pd[i] || !pol->pd_stat_fn)
1003 continue;
903d23f0 1004
3607849d 1005 pol->pd_stat_fn(blkg->pd[i], s);
49cb5168 1006 }
d09d8df3 1007
3607849d 1008 seq_puts(s, "\n");
49cb5168 1009}
903d23f0 1010
49cb5168
CH
1011static int blkcg_print_stat(struct seq_file *sf, void *v)
1012{
1013 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1014 struct blkcg_gq *blkg;
903d23f0 1015
49cb5168
CH
1016 if (!seq_css(sf)->parent)
1017 blkcg_fill_root_iostats();
1018 else
1019 cgroup_rstat_flush(blkcg->css.cgroup);
07b0fdec 1020
49cb5168
CH
1021 rcu_read_lock();
1022 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
1023 spin_lock_irq(&blkg->q->queue_lock);
1024 blkcg_print_one_stat(blkg, sf);
b0814361 1025 spin_unlock_irq(&blkg->q->queue_lock);
2ee867dc 1026 }
2ee867dc
TH
1027 rcu_read_unlock();
1028 return 0;
1029}
1030
e1f3b941 1031static struct cftype blkcg_files[] = {
2ee867dc
TH
1032 {
1033 .name = "stat",
1034 .seq_show = blkcg_print_stat,
1035 },
1036 { } /* terminate */
1037};
1038
e1f3b941 1039static struct cftype blkcg_legacy_files[] = {
84c124da
DS
1040 {
1041 .name = "reset_stats",
3c798398 1042 .write_u64 = blkcg_reset_stats,
22084190 1043 },
4baf6e33 1044 { } /* terminate */
31e4c28d
VG
1045};
1046
dec223c9
CH
1047#ifdef CONFIG_CGROUP_WRITEBACK
1048struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css)
1049{
1050 return &css_to_blkcg(css)->cgwb_list;
1051}
1052#endif
1053
59b57717
DZF
1054/*
1055 * blkcg destruction is a three-stage process.
1056 *
1057 * 1. Destruction starts. The blkcg_css_offline() callback is invoked
1058 * which offlines writeback. Here we tie the next stage of blkg destruction
1059 * to the completion of writeback associated with the blkcg. This lets us
1060 * avoid punting potentially large amounts of outstanding writeback to root
1061 * while maintaining any ongoing policies. The next stage is triggered when
1062 * the nr_cgwbs count goes to zero.
1063 *
1064 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
1065 * and handles the destruction of blkgs. Here the css reference held by
1066 * the blkg is put back eventually allowing blkcg_css_free() to be called.
1067 * This work may occur in cgwb_release_workfn() on the cgwb_release
1068 * workqueue. Any submitted ios that fail to get the blkg ref will be
1069 * punted to the root_blkg.
1070 *
1071 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1072 * This finally frees the blkcg.
1073 */
1074
59b57717
DZF
1075/**
1076 * blkcg_destroy_blkgs - responsible for shooting down blkgs
1077 * @blkcg: blkcg of interest
1078 *
1079 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
1080 * is nested inside q lock, this function performs reverse double lock dancing.
1081 * Destroying the blkgs releases the reference held on the blkcg's css allowing
1082 * blkcg_css_free to eventually be called.
1083 *
1084 * This is the blkcg counterpart of ioc_release_fn().
1085 */
397c9f46 1086static void blkcg_destroy_blkgs(struct blkcg *blkcg)
59b57717 1087{
6c635cae
BW
1088 might_sleep();
1089
9f13ef67 1090 spin_lock_irq(&blkcg->lock);
7ee9c562 1091
4c699480
JQ
1092 while (!hlist_empty(&blkcg->blkg_list)) {
1093 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
6b065462 1094 struct blkcg_gq, blkcg_node);
4c699480
JQ
1095 struct request_queue *q = blkg->q;
1096
6c635cae
BW
1097 if (need_resched() || !spin_trylock(&q->queue_lock)) {
1098 /*
1099 * Given that the system can accumulate a huge number
1100 * of blkgs in pathological cases, check to see if we
1101 * need to rescheduling to avoid softlockup.
1102 */
4c699480 1103 spin_unlock_irq(&blkcg->lock);
6c635cae 1104 cond_resched();
4c699480 1105 spin_lock_irq(&blkcg->lock);
6c635cae 1106 continue;
4c699480 1107 }
6c635cae
BW
1108
1109 blkg_destroy(blkg);
1110 spin_unlock(&q->queue_lock);
4c699480 1111 }
6b065462 1112
4c699480
JQ
1113 spin_unlock_irq(&blkcg->lock);
1114}
1115
397c9f46
CH
1116/**
1117 * blkcg_pin_online - pin online state
1118 * @blkcg_css: blkcg of interest
1119 *
1120 * While pinned, a blkcg is kept online. This is primarily used to
1121 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
1122 * while an associated cgwb is still active.
1123 */
1124void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css)
1125{
1126 refcount_inc(&css_to_blkcg(blkcg_css)->online_pin);
1127}
1128
1129/**
1130 * blkcg_unpin_online - unpin online state
1131 * @blkcg_css: blkcg of interest
1132 *
1133 * This is primarily used to impedance-match blkg and cgwb lifetimes so
1134 * that blkg doesn't go offline while an associated cgwb is still active.
1135 * When this count goes to zero, all active cgwbs have finished so the
1136 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
1137 */
1138void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css)
1139{
1140 struct blkcg *blkcg = css_to_blkcg(blkcg_css);
1141
1142 do {
1143 if (!refcount_dec_and_test(&blkcg->online_pin))
1144 break;
1145 blkcg_destroy_blkgs(blkcg);
1146 blkcg = blkcg_parent(blkcg);
1147 } while (blkcg);
1148}
1149
1150/**
1151 * blkcg_css_offline - cgroup css_offline callback
1152 * @css: css of interest
1153 *
1154 * This function is called when @css is about to go away. Here the cgwbs are
1155 * offlined first and only once writeback associated with the blkcg has
1156 * finished do we start step 2 (see above).
1157 */
1158static void blkcg_css_offline(struct cgroup_subsys_state *css)
1159{
1160 /* this prevents anyone from attaching or migrating to this blkcg */
dec223c9 1161 wb_blkcg_offline(css);
397c9f46
CH
1162
1163 /* put the base online pin allowing step 2 to be triggered */
1164 blkcg_unpin_online(css);
1165}
1166
eb95419b 1167static void blkcg_css_free(struct cgroup_subsys_state *css)
7ee9c562 1168{
eb95419b 1169 struct blkcg *blkcg = css_to_blkcg(css);
bc915e61 1170 int i;
7ee9c562 1171
7876f930 1172 mutex_lock(&blkcg_pol_mutex);
e4a9bde9 1173
7876f930 1174 list_del(&blkcg->all_blkcgs_node);
7876f930 1175
bc915e61 1176 for (i = 0; i < BLKCG_MAX_POLS; i++)
e4a9bde9
TH
1177 if (blkcg->cpd[i])
1178 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1179
1180 mutex_unlock(&blkcg_pol_mutex);
1181
3b8cc629 1182 free_percpu(blkcg->lhead);
bc915e61 1183 kfree(blkcg);
31e4c28d
VG
1184}
1185
eb95419b
TH
1186static struct cgroup_subsys_state *
1187blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
31e4c28d 1188{
3c798398 1189 struct blkcg *blkcg;
e48453c3 1190 int i;
31e4c28d 1191
7876f930
TH
1192 mutex_lock(&blkcg_pol_mutex);
1193
eb95419b 1194 if (!parent_css) {
3c798398 1195 blkcg = &blkcg_root;
bc915e61
TH
1196 } else {
1197 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
b5a9adcb 1198 if (!blkcg)
4c18c9e9 1199 goto unlock;
e48453c3
AA
1200 }
1201
3b8cc629
WL
1202 if (init_blkcg_llists(blkcg))
1203 goto free_blkcg;
1204
e48453c3
AA
1205 for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1206 struct blkcg_policy *pol = blkcg_policy[i];
1207 struct blkcg_policy_data *cpd;
1208
1209 /*
1210 * If the policy hasn't been attached yet, wait for it
1211 * to be attached before doing anything else. Otherwise,
1212 * check if the policy requires any specific per-cgroup
1213 * data: if it does, allocate and initialize it.
1214 */
e4a9bde9 1215 if (!pol || !pol->cpd_alloc_fn)
e48453c3
AA
1216 continue;
1217
e4a9bde9 1218 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
b5a9adcb 1219 if (!cpd)
e48453c3 1220 goto free_pd_blkcg;
b5a9adcb 1221
81437648
TH
1222 blkcg->cpd[i] = cpd;
1223 cpd->blkcg = blkcg;
e48453c3 1224 cpd->plid = i;
e4a9bde9
TH
1225 if (pol->cpd_init_fn)
1226 pol->cpd_init_fn(cpd);
e48453c3 1227 }
31e4c28d 1228
31e4c28d 1229 spin_lock_init(&blkcg->lock);
d866dbf6 1230 refcount_set(&blkcg->online_pin, 1);
e00f4f4d 1231 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
31e4c28d 1232 INIT_HLIST_HEAD(&blkcg->blkg_list);
52ebea74
TH
1233#ifdef CONFIG_CGROUP_WRITEBACK
1234 INIT_LIST_HEAD(&blkcg->cgwb_list);
1235#endif
7876f930
TH
1236 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1237
1238 mutex_unlock(&blkcg_pol_mutex);
31e4c28d 1239 return &blkcg->css;
e48453c3
AA
1240
1241free_pd_blkcg:
1242 for (i--; i >= 0; i--)
e4a9bde9
TH
1243 if (blkcg->cpd[i])
1244 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
3b8cc629
WL
1245 free_percpu(blkcg->lhead);
1246free_blkcg:
4c18c9e9 1247 if (blkcg != &blkcg_root)
1248 kfree(blkcg);
1249unlock:
7876f930 1250 mutex_unlock(&blkcg_pol_mutex);
b5a9adcb 1251 return ERR_PTR(-ENOMEM);
31e4c28d
VG
1252}
1253
4308a434
TH
1254static int blkcg_css_online(struct cgroup_subsys_state *css)
1255{
397c9f46 1256 struct blkcg *parent = blkcg_parent(css_to_blkcg(css));
4308a434
TH
1257
1258 /*
1259 * blkcg_pin_online() is used to delay blkcg offline so that blkgs
1260 * don't go offline while cgwbs are still active on them. Pin the
1261 * parent so that offline always happens towards the root.
1262 */
1263 if (parent)
d7dbd43f 1264 blkcg_pin_online(&parent->css);
4308a434
TH
1265 return 0;
1266}
1267
9823538f 1268int blkcg_init_disk(struct gendisk *disk)
5efd6113 1269{
9823538f 1270 struct request_queue *q = disk->queue;
d708f0d5
JA
1271 struct blkcg_gq *new_blkg, *blkg;
1272 bool preloaded;
ec13b1d6
TH
1273 int ret;
1274
472e4314
ML
1275 INIT_LIST_HEAD(&q->blkg_list);
1276
99e60387 1277 new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
d708f0d5
JA
1278 if (!new_blkg)
1279 return -ENOMEM;
1280
1281 preloaded = !radix_tree_preload(GFP_KERNEL);
1282
bea54883 1283 /* Make sure the root blkg exists. */
77c570a1 1284 /* spin_lock_irq can serve as RCU read-side critical section. */
0d945c1f 1285 spin_lock_irq(&q->queue_lock);
99e60387 1286 blkg = blkg_create(&blkcg_root, disk, new_blkg);
901932a3
JB
1287 if (IS_ERR(blkg))
1288 goto err_unlock;
1289 q->root_blkg = blkg;
0d945c1f 1290 spin_unlock_irq(&q->queue_lock);
ec13b1d6 1291
d708f0d5
JA
1292 if (preloaded)
1293 radix_tree_preload_end();
1294
b0dde3f5 1295 ret = blk_ioprio_init(disk);
556910e3
BVA
1296 if (ret)
1297 goto err_destroy_all;
1298
e13793ba 1299 ret = blk_throtl_init(disk);
19688d7f 1300 if (ret)
33dc6279 1301 goto err_ioprio_exit;
19688d7f 1302
16fac1b5 1303 ret = blk_iolatency_init(disk);
33dc6279
CH
1304 if (ret)
1305 goto err_throtl_exit;
6f5ddde4 1306
04be60b5 1307 return 0;
901932a3 1308
33dc6279 1309err_throtl_exit:
e13793ba 1310 blk_throtl_exit(disk);
33dc6279 1311err_ioprio_exit:
b0dde3f5 1312 blk_ioprio_exit(disk);
04be60b5 1313err_destroy_all:
00ad6991 1314 blkg_destroy_all(disk);
04be60b5 1315 return ret;
901932a3 1316err_unlock:
0d945c1f 1317 spin_unlock_irq(&q->queue_lock);
901932a3
JB
1318 if (preloaded)
1319 radix_tree_preload_end();
1320 return PTR_ERR(blkg);
5efd6113
TH
1321}
1322
9823538f 1323void blkcg_exit_disk(struct gendisk *disk)
5efd6113 1324{
00ad6991 1325 blkg_destroy_all(disk);
813e6930 1326 rq_qos_exit(disk->queue);
e13793ba 1327 blk_throtl_exit(disk);
5efd6113
TH
1328}
1329
69d7fde5
TH
1330static void blkcg_bind(struct cgroup_subsys_state *root_css)
1331{
1332 int i;
1333
1334 mutex_lock(&blkcg_pol_mutex);
1335
1336 for (i = 0; i < BLKCG_MAX_POLS; i++) {
1337 struct blkcg_policy *pol = blkcg_policy[i];
1338 struct blkcg *blkcg;
1339
1340 if (!pol || !pol->cpd_bind_fn)
1341 continue;
1342
1343 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1344 if (blkcg->cpd[pol->plid])
1345 pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1346 }
1347 mutex_unlock(&blkcg_pol_mutex);
1348}
1349
d09d8df3
JB
1350static void blkcg_exit(struct task_struct *tsk)
1351{
1352 if (tsk->throttle_queue)
1353 blk_put_queue(tsk->throttle_queue);
1354 tsk->throttle_queue = NULL;
1355}
1356
c165b3e3 1357struct cgroup_subsys io_cgrp_subsys = {
92fb9748 1358 .css_alloc = blkcg_css_alloc,
4308a434 1359 .css_online = blkcg_css_online,
92fb9748
TH
1360 .css_offline = blkcg_css_offline,
1361 .css_free = blkcg_css_free,
f7331648 1362 .css_rstat_flush = blkcg_rstat_flush,
69d7fde5 1363 .bind = blkcg_bind,
2ee867dc 1364 .dfl_cftypes = blkcg_files,
880f50e2 1365 .legacy_cftypes = blkcg_legacy_files,
c165b3e3 1366 .legacy_name = "blkio",
d09d8df3 1367 .exit = blkcg_exit,
1ced953b
TH
1368#ifdef CONFIG_MEMCG
1369 /*
1370 * This ensures that, if available, memcg is automatically enabled
1371 * together on the default hierarchy so that the owner cgroup can
1372 * be retrieved from writeback pages.
1373 */
1374 .depends_on = 1 << memory_cgrp_id,
1375#endif
676f7c8f 1376};
c165b3e3 1377EXPORT_SYMBOL_GPL(io_cgrp_subsys);
676f7c8f 1378
a2b1693b
TH
1379/**
1380 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1381 * @q: request_queue of interest
1382 * @pol: blkcg policy to activate
1383 *
1384 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
1385 * bypass mode to populate its blkgs with policy_data for @pol.
1386 *
1387 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1388 * from IO path. Update of each blkg is protected by both queue and blkcg
1389 * locks so that holding either lock and testing blkcg_policy_enabled() is
1390 * always enough for dereferencing policy data.
1391 *
1392 * The caller is responsible for synchronizing [de]activations and policy
1393 * [un]registerations. Returns 0 on success, -errno on failure.
1394 */
1395int blkcg_activate_policy(struct request_queue *q,
3c798398 1396 const struct blkcg_policy *pol)
a2b1693b 1397{
4c55f4f9 1398 struct blkg_policy_data *pd_prealloc = NULL;
9d179b86 1399 struct blkcg_gq *blkg, *pinned_blkg = NULL;
4c55f4f9 1400 int ret;
a2b1693b
TH
1401
1402 if (blkcg_policy_enabled(q, pol))
1403 return 0;
1404
344e9ffc 1405 if (queue_is_mq(q))
bd166ef1 1406 blk_mq_freeze_queue(q);
9d179b86 1407retry:
0d945c1f 1408 spin_lock_irq(&q->queue_lock);
a2b1693b 1409
9d179b86 1410 /* blkg_list is pushed at the head, reverse walk to allocate parents first */
71c81407 1411 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
4c55f4f9
TH
1412 struct blkg_policy_data *pd;
1413
1414 if (blkg->pd[pol->plid])
1415 continue;
a2b1693b 1416
9d179b86
TH
1417 /* If prealloc matches, use it; otherwise try GFP_NOWAIT */
1418 if (blkg == pinned_blkg) {
1419 pd = pd_prealloc;
1420 pd_prealloc = NULL;
1421 } else {
1422 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q,
1423 blkg->blkcg);
1424 }
1425
4c55f4f9 1426 if (!pd) {
9d179b86
TH
1427 /*
1428 * GFP_NOWAIT failed. Free the existing one and
1429 * prealloc for @blkg w/ GFP_KERNEL.
1430 */
1431 if (pinned_blkg)
1432 blkg_put(pinned_blkg);
1433 blkg_get(blkg);
1434 pinned_blkg = blkg;
1435
0d945c1f 1436 spin_unlock_irq(&q->queue_lock);
9d179b86
TH
1437
1438 if (pd_prealloc)
1439 pol->pd_free_fn(pd_prealloc);
1440 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q,
1441 blkg->blkcg);
1442 if (pd_prealloc)
1443 goto retry;
1444 else
1445 goto enomem;
4c55f4f9 1446 }
a2b1693b
TH
1447
1448 blkg->pd[pol->plid] = pd;
1449 pd->blkg = blkg;
b276a876 1450 pd->plid = pol->plid;
a2b1693b
TH
1451 }
1452
9d179b86
TH
1453 /* all allocated, init in the same order */
1454 if (pol->pd_init_fn)
1455 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
1456 pol->pd_init_fn(blkg->pd[pol->plid]);
1457
e3ff8887
YK
1458 if (pol->pd_online_fn)
1459 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
1460 pol->pd_online_fn(blkg->pd[pol->plid]);
1461
a2b1693b
TH
1462 __set_bit(pol->plid, q->blkcg_pols);
1463 ret = 0;
4c55f4f9 1464
0d945c1f 1465 spin_unlock_irq(&q->queue_lock);
9d179b86 1466out:
344e9ffc 1467 if (queue_is_mq(q))
bd166ef1 1468 blk_mq_unfreeze_queue(q);
9d179b86
TH
1469 if (pinned_blkg)
1470 blkg_put(pinned_blkg);
001bea73
TH
1471 if (pd_prealloc)
1472 pol->pd_free_fn(pd_prealloc);
a2b1693b 1473 return ret;
9d179b86
TH
1474
1475enomem:
1476 /* alloc failed, nothing's initialized yet, free everything */
1477 spin_lock_irq(&q->queue_lock);
1478 list_for_each_entry(blkg, &q->blkg_list, q_node) {
858560b2
LJ
1479 struct blkcg *blkcg = blkg->blkcg;
1480
1481 spin_lock(&blkcg->lock);
9d179b86
TH
1482 if (blkg->pd[pol->plid]) {
1483 pol->pd_free_fn(blkg->pd[pol->plid]);
1484 blkg->pd[pol->plid] = NULL;
1485 }
858560b2 1486 spin_unlock(&blkcg->lock);
9d179b86
TH
1487 }
1488 spin_unlock_irq(&q->queue_lock);
1489 ret = -ENOMEM;
1490 goto out;
a2b1693b
TH
1491}
1492EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1493
1494/**
1495 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1496 * @q: request_queue of interest
1497 * @pol: blkcg policy to deactivate
1498 *
1499 * Deactivate @pol on @q. Follows the same synchronization rules as
1500 * blkcg_activate_policy().
1501 */
1502void blkcg_deactivate_policy(struct request_queue *q,
3c798398 1503 const struct blkcg_policy *pol)
a2b1693b 1504{
3c798398 1505 struct blkcg_gq *blkg;
a2b1693b
TH
1506
1507 if (!blkcg_policy_enabled(q, pol))
1508 return;
1509
344e9ffc 1510 if (queue_is_mq(q))
bd166ef1 1511 blk_mq_freeze_queue(q);
bd166ef1 1512
0d945c1f 1513 spin_lock_irq(&q->queue_lock);
a2b1693b
TH
1514
1515 __clear_bit(pol->plid, q->blkcg_pols);
1516
1517 list_for_each_entry(blkg, &q->blkg_list, q_node) {
858560b2
LJ
1518 struct blkcg *blkcg = blkg->blkcg;
1519
1520 spin_lock(&blkcg->lock);
001bea73 1521 if (blkg->pd[pol->plid]) {
6b065462 1522 if (pol->pd_offline_fn)
a9520cd6 1523 pol->pd_offline_fn(blkg->pd[pol->plid]);
001bea73
TH
1524 pol->pd_free_fn(blkg->pd[pol->plid]);
1525 blkg->pd[pol->plid] = NULL;
1526 }
858560b2 1527 spin_unlock(&blkcg->lock);
a2b1693b
TH
1528 }
1529
0d945c1f 1530 spin_unlock_irq(&q->queue_lock);
bd166ef1 1531
344e9ffc 1532 if (queue_is_mq(q))
bd166ef1 1533 blk_mq_unfreeze_queue(q);
a2b1693b
TH
1534}
1535EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1536
e55cf798
JY
1537static void blkcg_free_all_cpd(struct blkcg_policy *pol)
1538{
1539 struct blkcg *blkcg;
1540
1541 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1542 if (blkcg->cpd[pol->plid]) {
1543 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1544 blkcg->cpd[pol->plid] = NULL;
1545 }
1546 }
1547}
1548
8bd435b3 1549/**
3c798398
TH
1550 * blkcg_policy_register - register a blkcg policy
1551 * @pol: blkcg policy to register
8bd435b3 1552 *
3c798398
TH
1553 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1554 * successful registration. Returns 0 on success and -errno on failure.
8bd435b3 1555 */
d5bf0291 1556int blkcg_policy_register(struct blkcg_policy *pol)
3e252066 1557{
06b285bd 1558 struct blkcg *blkcg;
8bd435b3 1559 int i, ret;
e8989fae 1560
838f13bf 1561 mutex_lock(&blkcg_pol_register_mutex);
bc0d6501
TH
1562 mutex_lock(&blkcg_pol_mutex);
1563
8bd435b3
TH
1564 /* find an empty slot */
1565 ret = -ENOSPC;
1566 for (i = 0; i < BLKCG_MAX_POLS; i++)
3c798398 1567 if (!blkcg_policy[i])
8bd435b3 1568 break;
01c5f85a
JA
1569 if (i >= BLKCG_MAX_POLS) {
1570 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
838f13bf 1571 goto err_unlock;
01c5f85a 1572 }
035d10b2 1573
e8401073 1574 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1575 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1576 (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1577 goto err_unlock;
1578
06b285bd 1579 /* register @pol */
3c798398 1580 pol->plid = i;
06b285bd
TH
1581 blkcg_policy[pol->plid] = pol;
1582
1583 /* allocate and install cpd's */
e4a9bde9 1584 if (pol->cpd_alloc_fn) {
06b285bd
TH
1585 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1586 struct blkcg_policy_data *cpd;
1587
e4a9bde9 1588 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
bbb427e3 1589 if (!cpd)
06b285bd 1590 goto err_free_cpds;
06b285bd 1591
81437648
TH
1592 blkcg->cpd[pol->plid] = cpd;
1593 cpd->blkcg = blkcg;
06b285bd 1594 cpd->plid = pol->plid;
86a5bba5
TH
1595 if (pol->cpd_init_fn)
1596 pol->cpd_init_fn(cpd);
06b285bd
TH
1597 }
1598 }
1599
838f13bf 1600 mutex_unlock(&blkcg_pol_mutex);
8bd435b3 1601
8bd435b3 1602 /* everything is in place, add intf files for the new policy */
2ee867dc
TH
1603 if (pol->dfl_cftypes)
1604 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1605 pol->dfl_cftypes));
880f50e2 1606 if (pol->legacy_cftypes)
c165b3e3 1607 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
880f50e2 1608 pol->legacy_cftypes));
838f13bf
TH
1609 mutex_unlock(&blkcg_pol_register_mutex);
1610 return 0;
1611
06b285bd 1612err_free_cpds:
e55cf798
JY
1613 if (pol->cpd_free_fn)
1614 blkcg_free_all_cpd(pol);
1615
06b285bd 1616 blkcg_policy[pol->plid] = NULL;
838f13bf 1617err_unlock:
bc0d6501 1618 mutex_unlock(&blkcg_pol_mutex);
838f13bf 1619 mutex_unlock(&blkcg_pol_register_mutex);
8bd435b3 1620 return ret;
3e252066 1621}
3c798398 1622EXPORT_SYMBOL_GPL(blkcg_policy_register);
3e252066 1623
8bd435b3 1624/**
3c798398
TH
1625 * blkcg_policy_unregister - unregister a blkcg policy
1626 * @pol: blkcg policy to unregister
8bd435b3 1627 *
3c798398 1628 * Undo blkcg_policy_register(@pol). Might sleep.
8bd435b3 1629 */
3c798398 1630void blkcg_policy_unregister(struct blkcg_policy *pol)
3e252066 1631{
838f13bf 1632 mutex_lock(&blkcg_pol_register_mutex);
bc0d6501 1633
3c798398 1634 if (WARN_ON(blkcg_policy[pol->plid] != pol))
8bd435b3
TH
1635 goto out_unlock;
1636
1637 /* kill the intf files first */
2ee867dc
TH
1638 if (pol->dfl_cftypes)
1639 cgroup_rm_cftypes(pol->dfl_cftypes);
880f50e2
TH
1640 if (pol->legacy_cftypes)
1641 cgroup_rm_cftypes(pol->legacy_cftypes);
44ea53de 1642
06b285bd 1643 /* remove cpds and unregister */
838f13bf 1644 mutex_lock(&blkcg_pol_mutex);
06b285bd 1645
e55cf798
JY
1646 if (pol->cpd_free_fn)
1647 blkcg_free_all_cpd(pol);
1648
3c798398 1649 blkcg_policy[pol->plid] = NULL;
06b285bd 1650
bc0d6501 1651 mutex_unlock(&blkcg_pol_mutex);
838f13bf
TH
1652out_unlock:
1653 mutex_unlock(&blkcg_pol_register_mutex);
3e252066 1654}
3c798398 1655EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
903d23f0 1656
d3f77dfd
TH
1657bool __blkcg_punt_bio_submit(struct bio *bio)
1658{
1659 struct blkcg_gq *blkg = bio->bi_blkg;
1660
1661 /* consume the flag first */
1662 bio->bi_opf &= ~REQ_CGROUP_PUNT;
1663
1664 /* never bounce for the root cgroup */
1665 if (!blkg->parent)
1666 return false;
1667
1668 spin_lock_bh(&blkg->async_bio_lock);
1669 bio_list_add(&blkg->async_bios, bio);
1670 spin_unlock_bh(&blkg->async_bio_lock);
1671
1672 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
1673 return true;
1674}
1675
d09d8df3
JB
1676/*
1677 * Scale the accumulated delay based on how long it has been since we updated
1678 * the delay. We only call this when we are adding delay, in case it's been a
1679 * while since we added delay, and when we are checking to see if we need to
1680 * delay a task, to account for any delays that may have occurred.
1681 */
1682static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1683{
1684 u64 old = atomic64_read(&blkg->delay_start);
1685
54c52e10
TH
1686 /* negative use_delay means no scaling, see blkcg_set_delay() */
1687 if (atomic_read(&blkg->use_delay) < 0)
1688 return;
1689
d09d8df3
JB
1690 /*
1691 * We only want to scale down every second. The idea here is that we
1692 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1693 * time window. We only want to throttle tasks for recent delay that
1694 * has occurred, in 1 second time windows since that's the maximum
1695 * things can be throttled. We save the current delay window in
1696 * blkg->last_delay so we know what amount is still left to be charged
1697 * to the blkg from this point onward. blkg->last_use keeps track of
1698 * the use_delay counter. The idea is if we're unthrottling the blkg we
1699 * are ok with whatever is happening now, and we can take away more of
1700 * the accumulated delay as we've already throttled enough that
1701 * everybody is happy with their IO latencies.
1702 */
1703 if (time_before64(old + NSEC_PER_SEC, now) &&
96388f57 1704 atomic64_try_cmpxchg(&blkg->delay_start, &old, now)) {
d09d8df3
JB
1705 u64 cur = atomic64_read(&blkg->delay_nsec);
1706 u64 sub = min_t(u64, blkg->last_delay, now - old);
1707 int cur_use = atomic_read(&blkg->use_delay);
1708
1709 /*
1710 * We've been unthrottled, subtract a larger chunk of our
1711 * accumulated delay.
1712 */
1713 if (cur_use < blkg->last_use)
1714 sub = max_t(u64, sub, blkg->last_delay >> 1);
1715
1716 /*
1717 * This shouldn't happen, but handle it anyway. Our delay_nsec
1718 * should only ever be growing except here where we subtract out
1719 * min(last_delay, 1 second), but lord knows bugs happen and I'd
1720 * rather not end up with negative numbers.
1721 */
1722 if (unlikely(cur < sub)) {
1723 atomic64_set(&blkg->delay_nsec, 0);
1724 blkg->last_delay = 0;
1725 } else {
1726 atomic64_sub(sub, &blkg->delay_nsec);
1727 blkg->last_delay = cur - sub;
1728 }
1729 blkg->last_use = cur_use;
1730 }
1731}
1732
1733/*
1734 * This is called when we want to actually walk up the hierarchy and check to
1735 * see if we need to throttle, and then actually throttle if there is some
1736 * accumulated delay. This should only be called upon return to user space so
1737 * we're not holding some lock that would induce a priority inversion.
1738 */
1739static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1740{
fd112c74 1741 unsigned long pflags;
5160a5a5 1742 bool clamp;
d09d8df3
JB
1743 u64 now = ktime_to_ns(ktime_get());
1744 u64 exp;
1745 u64 delay_nsec = 0;
1746 int tok;
1747
1748 while (blkg->parent) {
5160a5a5
TH
1749 int use_delay = atomic_read(&blkg->use_delay);
1750
1751 if (use_delay) {
1752 u64 this_delay;
1753
d09d8df3 1754 blkcg_scale_delay(blkg, now);
5160a5a5
TH
1755 this_delay = atomic64_read(&blkg->delay_nsec);
1756 if (this_delay > delay_nsec) {
1757 delay_nsec = this_delay;
1758 clamp = use_delay > 0;
1759 }
d09d8df3
JB
1760 }
1761 blkg = blkg->parent;
1762 }
1763
1764 if (!delay_nsec)
1765 return;
1766
1767 /*
1768 * Let's not sleep for all eternity if we've amassed a huge delay.
1769 * Swapping or metadata IO can accumulate 10's of seconds worth of
1770 * delay, and we want userspace to be able to do _something_ so cap the
5160a5a5
TH
1771 * delays at 0.25s. If there's 10's of seconds worth of delay then the
1772 * tasks will be delayed for 0.25 second for every syscall. If
1773 * blkcg_set_delay() was used as indicated by negative use_delay, the
1774 * caller is responsible for regulating the range.
d09d8df3 1775 */
5160a5a5
TH
1776 if (clamp)
1777 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
d09d8df3 1778
fd112c74
JB
1779 if (use_memdelay)
1780 psi_memstall_enter(&pflags);
d09d8df3
JB
1781
1782 exp = ktime_add_ns(now, delay_nsec);
1783 tok = io_schedule_prepare();
1784 do {
1785 __set_current_state(TASK_KILLABLE);
1786 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1787 break;
1788 } while (!fatal_signal_pending(current));
1789 io_schedule_finish(tok);
fd112c74
JB
1790
1791 if (use_memdelay)
1792 psi_memstall_leave(&pflags);
d09d8df3
JB
1793}
1794
1795/**
1796 * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1797 *
1798 * This is only called if we've been marked with set_notify_resume(). Obviously
1799 * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1800 * check to see if current->throttle_queue is set and if not this doesn't do
1801 * anything. This should only ever be called by the resume code, it's not meant
1802 * to be called by people willy-nilly as it will actually do the work to
1803 * throttle the task if it is setup for throttling.
1804 */
1805void blkcg_maybe_throttle_current(void)
1806{
1807 struct request_queue *q = current->throttle_queue;
d09d8df3
JB
1808 struct blkcg *blkcg;
1809 struct blkcg_gq *blkg;
1810 bool use_memdelay = current->use_memdelay;
1811
1812 if (!q)
1813 return;
1814
1815 current->throttle_queue = NULL;
1816 current->use_memdelay = false;
1817
1818 rcu_read_lock();
82778259 1819 blkcg = css_to_blkcg(blkcg_css());
d09d8df3
JB
1820 if (!blkcg)
1821 goto out;
1822 blkg = blkg_lookup(blkcg, q);
1823 if (!blkg)
1824 goto out;
7754f669 1825 if (!blkg_tryget(blkg))
d09d8df3
JB
1826 goto out;
1827 rcu_read_unlock();
d09d8df3
JB
1828
1829 blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1830 blkg_put(blkg);
cc7ecc25 1831 blk_put_queue(q);
d09d8df3
JB
1832 return;
1833out:
1834 rcu_read_unlock();
1835 blk_put_queue(q);
1836}
d09d8df3
JB
1837
1838/**
1839 * blkcg_schedule_throttle - this task needs to check for throttling
1d6df9d3 1840 * @disk: disk to throttle
537d71b3 1841 * @use_memdelay: do we charge this to memory delay for PSI
d09d8df3
JB
1842 *
1843 * This is called by the IO controller when we know there's delay accumulated
1844 * for the blkg for this task. We do not pass the blkg because there are places
1845 * we call this that may not have that information, the swapping code for
de185b56 1846 * instance will only have a block_device at that point. This set's the
d09d8df3
JB
1847 * notify_resume for the task to check and see if it requires throttling before
1848 * returning to user space.
1849 *
1850 * We will only schedule once per syscall. You can call this over and over
1851 * again and it will only do the check once upon return to user space, and only
1852 * throttle once. If the task needs to be throttled again it'll need to be
1853 * re-set at the next time we see the task.
1854 */
de185b56 1855void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay)
d09d8df3 1856{
de185b56
CH
1857 struct request_queue *q = disk->queue;
1858
d09d8df3
JB
1859 if (unlikely(current->flags & PF_KTHREAD))
1860 return;
1861
49d1822b
CX
1862 if (current->throttle_queue != q) {
1863 if (!blk_get_queue(q))
1864 return;
1865
1866 if (current->throttle_queue)
1867 blk_put_queue(current->throttle_queue);
1868 current->throttle_queue = q;
1869 }
d09d8df3 1870
d09d8df3
JB
1871 if (use_memdelay)
1872 current->use_memdelay = use_memdelay;
1873 set_notify_resume(current);
1874}
d09d8df3
JB
1875
1876/**
1877 * blkcg_add_delay - add delay to this blkg
537d71b3
BVA
1878 * @blkg: blkg of interest
1879 * @now: the current time in nanoseconds
1880 * @delta: how many nanoseconds of delay to add
d09d8df3
JB
1881 *
1882 * Charge @delta to the blkg's current delay accumulation. This is used to
1883 * throttle tasks if an IO controller thinks we need more throttling.
1884 */
1885void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1886{
54c52e10
TH
1887 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
1888 return;
d09d8df3
JB
1889 blkcg_scale_delay(blkg, now);
1890 atomic64_add(delta, &blkg->delay_nsec);
1891}
d09d8df3 1892
28fc591f
CH
1893/**
1894 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
13c7863d
CH
1895 * @bio: target bio
1896 * @css: target css
28fc591f 1897 *
13c7863d
CH
1898 * As the failure mode here is to walk up the blkg tree, this ensure that the
1899 * blkg->parent pointers are always valid. This returns the blkg that it ended
1900 * up taking a reference on or %NULL if no reference was taken.
28fc591f 1901 */
13c7863d
CH
1902static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
1903 struct cgroup_subsys_state *css)
28fc591f 1904{
13c7863d 1905 struct blkcg_gq *blkg, *ret_blkg = NULL;
28fc591f 1906
13c7863d 1907 rcu_read_lock();
99e60387 1908 blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk);
28fc591f
CH
1909 while (blkg) {
1910 if (blkg_tryget(blkg)) {
1911 ret_blkg = blkg;
1912 break;
1913 }
1914 blkg = blkg->parent;
1915 }
13c7863d 1916 rcu_read_unlock();
28fc591f
CH
1917
1918 return ret_blkg;
1919}
1920
1921/**
1922 * bio_associate_blkg_from_css - associate a bio with a specified css
1923 * @bio: target bio
1924 * @css: target css
1925 *
1926 * Associate @bio with the blkg found by combining the css's blkg and the
1927 * request_queue of the @bio. An association failure is handled by walking up
1928 * the blkg tree. Therefore, the blkg associated can be anything between @blkg
1929 * and q->root_blkg. This situation only happens when a cgroup is dying and
1930 * then the remaining bios will spill to the closest alive blkg.
1931 *
1932 * A reference will be taken on the blkg and will be released when @bio is
1933 * freed.
1934 */
1935void bio_associate_blkg_from_css(struct bio *bio,
1936 struct cgroup_subsys_state *css)
1937{
28fc591f
CH
1938 if (bio->bi_blkg)
1939 blkg_put(bio->bi_blkg);
1940
a5b97526 1941 if (css && css->parent) {
13c7863d 1942 bio->bi_blkg = blkg_tryget_closest(bio, css);
a5b97526 1943 } else {
ed6cddef
PB
1944 blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg);
1945 bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg;
a5b97526 1946 }
28fc591f
CH
1947}
1948EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
1949
1950/**
1951 * bio_associate_blkg - associate a bio with a blkg
1952 * @bio: target bio
1953 *
1954 * Associate @bio with the blkg found from the bio's css and request_queue.
1955 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is
1956 * already associated, the css is reused and association redone as the
1957 * request_queue may have changed.
1958 */
1959void bio_associate_blkg(struct bio *bio)
1960{
1961 struct cgroup_subsys_state *css;
1962
1963 rcu_read_lock();
1964
1965 if (bio->bi_blkg)
bbb1ebe7 1966 css = bio_blkcg_css(bio);
28fc591f
CH
1967 else
1968 css = blkcg_css();
1969
1970 bio_associate_blkg_from_css(bio, css);
1971
1972 rcu_read_unlock();
1973}
1974EXPORT_SYMBOL_GPL(bio_associate_blkg);
1975
1976/**
1977 * bio_clone_blkg_association - clone blkg association from src to dst bio
1978 * @dst: destination bio
1979 * @src: source bio
1980 */
1981void bio_clone_blkg_association(struct bio *dst, struct bio *src)
1982{
22b106e5
JK
1983 if (src->bi_blkg)
1984 bio_associate_blkg_from_css(dst, bio_blkcg_css(src));
28fc591f
CH
1985}
1986EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
1987
db18a53e
CH
1988static int blk_cgroup_io_type(struct bio *bio)
1989{
1990 if (op_is_discard(bio->bi_opf))
1991 return BLKG_IOSTAT_DISCARD;
1992 if (op_is_write(bio->bi_opf))
1993 return BLKG_IOSTAT_WRITE;
1994 return BLKG_IOSTAT_READ;
1995}
1996
1997void blk_cgroup_bio_start(struct bio *bio)
1998{
3b8cc629 1999 struct blkcg *blkcg = bio->bi_blkg->blkcg;
db18a53e
CH
2000 int rwd = blk_cgroup_io_type(bio), cpu;
2001 struct blkg_iostat_set *bis;
3c08b093 2002 unsigned long flags;
db18a53e
CH
2003
2004 cpu = get_cpu();
2005 bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
3c08b093 2006 flags = u64_stats_update_begin_irqsave(&bis->sync);
db18a53e
CH
2007
2008 /*
2009 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
2010 * bio and we would have already accounted for the size of the bio.
2011 */
2012 if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
2013 bio_set_flag(bio, BIO_CGROUP_ACCT);
0b8cc25d 2014 bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
db18a53e
CH
2015 }
2016 bis->cur.ios[rwd]++;
2017
3b8cc629
WL
2018 /*
2019 * If the iostat_cpu isn't in a lockless list, put it into the
2020 * list to indicate that a stat update is pending.
2021 */
2022 if (!READ_ONCE(bis->lqueued)) {
2023 struct llist_head *lhead = this_cpu_ptr(blkcg->lhead);
2024
2025 llist_add(&bis->lnode, lhead);
2026 WRITE_ONCE(bis->lqueued, true);
2027 percpu_ref_get(&bis->blkg->refcnt);
2028 }
2029
3c08b093 2030 u64_stats_update_end_irqrestore(&bis->sync, flags);
db18a53e 2031 if (cgroup_subsys_on_dfl(io_cgrp_subsys))
3b8cc629 2032 cgroup_rstat_updated(blkcg->css.cgroup, cpu);
db18a53e
CH
2033 put_cpu();
2034}
2035
216889aa
CH
2036bool blk_cgroup_congested(void)
2037{
2038 struct cgroup_subsys_state *css;
2039 bool ret = false;
2040
2041 rcu_read_lock();
d200ca14 2042 for (css = blkcg_css(); css; css = css->parent) {
216889aa
CH
2043 if (atomic_read(&css->cgroup->congestion_count)) {
2044 ret = true;
2045 break;
2046 }
216889aa
CH
2047 }
2048 rcu_read_unlock();
2049 return ret;
2050}
2051
d3f77dfd
TH
2052static int __init blkcg_init(void)
2053{
2054 blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
2055 WQ_MEM_RECLAIM | WQ_FREEZABLE |
2056 WQ_UNBOUND | WQ_SYSFS, 0);
2057 if (!blkcg_punt_bio_wq)
2058 return -ENOMEM;
2059 return 0;
2060}
2061subsys_initcall(blkcg_init);
2062
903d23f0
JB
2063module_param(blkcg_debug_stats, bool, 0644);
2064MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");