| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Common Block IO controller cgroup interface |
| 4 | * |
| 5 | * Based on ideas and code from CFQ, CFS and BFQ: |
| 6 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
| 7 | * |
| 8 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
| 9 | * Paolo Valente <paolo.valente@unimore.it> |
| 10 | * |
| 11 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> |
| 12 | * Nauman Rafique <nauman@google.com> |
| 13 | * |
| 14 | * For policy-specific per-blkcg data: |
| 15 | * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> |
| 16 | * Arianna Avanzini <avanzini.arianna@gmail.com> |
| 17 | */ |
| 18 | #include <linux/ioprio.h> |
| 19 | #include <linux/kdev_t.h> |
| 20 | #include <linux/module.h> |
| 21 | #include <linux/sched/signal.h> |
| 22 | #include <linux/err.h> |
| 23 | #include <linux/blkdev.h> |
| 24 | #include <linux/backing-dev.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/delay.h> |
| 27 | #include <linux/atomic.h> |
| 28 | #include <linux/ctype.h> |
| 29 | #include <linux/resume_user_mode.h> |
| 30 | #include <linux/psi.h> |
| 31 | #include <linux/part_stat.h> |
| 32 | #include "blk.h" |
| 33 | #include "blk-cgroup.h" |
| 34 | #include "blk-ioprio.h" |
| 35 | #include "blk-throttle.h" |
| 36 | |
| 37 | /* |
| 38 | * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. |
| 39 | * blkcg_pol_register_mutex nests outside of it and synchronizes entire |
| 40 | * policy [un]register operations including cgroup file additions / |
| 41 | * removals. Putting cgroup file registration outside blkcg_pol_mutex |
| 42 | * allows grabbing it from cgroup callbacks. |
| 43 | */ |
| 44 | static DEFINE_MUTEX(blkcg_pol_register_mutex); |
| 45 | static DEFINE_MUTEX(blkcg_pol_mutex); |
| 46 | |
| 47 | struct blkcg blkcg_root; |
| 48 | EXPORT_SYMBOL_GPL(blkcg_root); |
| 49 | |
| 50 | struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; |
| 51 | EXPORT_SYMBOL_GPL(blkcg_root_css); |
| 52 | |
| 53 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
| 54 | |
| 55 | static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ |
| 56 | |
| 57 | bool blkcg_debug_stats = false; |
| 58 | |
| 59 | #define BLKG_DESTROY_BATCH_SIZE 64 |
| 60 | |
| 61 | /* |
| 62 | * Lockless lists for tracking IO stats update |
| 63 | * |
| 64 | * New IO stats are stored in the percpu iostat_cpu within blkcg_gq (blkg). |
| 65 | * There are multiple blkg's (one for each block device) attached to each |
| 66 | * blkcg. The rstat code keeps track of which cpu has IO stats updated, |
| 67 | * but it doesn't know which blkg has the updated stats. If there are many |
| 68 | * block devices in a system, the cost of iterating all the blkg's to flush |
| 69 | * out the IO stats can be high. To reduce such overhead, a set of percpu |
| 70 | * lockless lists (lhead) per blkcg are used to track the set of recently |
| 71 | * updated iostat_cpu's since the last flush. An iostat_cpu will be put |
| 72 | * onto the lockless list on the update side [blk_cgroup_bio_start()] if |
| 73 | * not there yet and then removed when being flushed [blkcg_rstat_flush()]. |
| 74 | * References to blkg are gotten and then put back in the process to |
| 75 | * protect against blkg removal. |
| 76 | * |
| 77 | * Return: 0 if successful or -ENOMEM if allocation fails. |
| 78 | */ |
| 79 | static int init_blkcg_llists(struct blkcg *blkcg) |
| 80 | { |
| 81 | int cpu; |
| 82 | |
| 83 | blkcg->lhead = alloc_percpu_gfp(struct llist_head, GFP_KERNEL); |
| 84 | if (!blkcg->lhead) |
| 85 | return -ENOMEM; |
| 86 | |
| 87 | for_each_possible_cpu(cpu) |
| 88 | init_llist_head(per_cpu_ptr(blkcg->lhead, cpu)); |
| 89 | return 0; |
| 90 | } |
| 91 | |
| 92 | /** |
| 93 | * blkcg_css - find the current css |
| 94 | * |
| 95 | * Find the css associated with either the kthread or the current task. |
| 96 | * This may return a dying css, so it is up to the caller to use tryget logic |
| 97 | * to confirm it is alive and well. |
| 98 | */ |
| 99 | static struct cgroup_subsys_state *blkcg_css(void) |
| 100 | { |
| 101 | struct cgroup_subsys_state *css; |
| 102 | |
| 103 | css = kthread_blkcg(); |
| 104 | if (css) |
| 105 | return css; |
| 106 | return task_css(current, io_cgrp_id); |
| 107 | } |
| 108 | |
| 109 | static bool blkcg_policy_enabled(struct request_queue *q, |
| 110 | const struct blkcg_policy *pol) |
| 111 | { |
| 112 | return pol && test_bit(pol->plid, q->blkcg_pols); |
| 113 | } |
| 114 | |
| 115 | static void blkg_free_workfn(struct work_struct *work) |
| 116 | { |
| 117 | struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, |
| 118 | free_work); |
| 119 | struct request_queue *q = blkg->q; |
| 120 | int i; |
| 121 | |
| 122 | /* |
| 123 | * pd_free_fn() can also be called from blkcg_deactivate_policy(), |
| 124 | * in order to make sure pd_free_fn() is called in order, the deletion |
| 125 | * of the list blkg->q_node is delayed to here from blkg_destroy(), and |
| 126 | * blkcg_mutex is used to synchronize blkg_free_workfn() and |
| 127 | * blkcg_deactivate_policy(). |
| 128 | */ |
| 129 | mutex_lock(&q->blkcg_mutex); |
| 130 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
| 131 | if (blkg->pd[i]) |
| 132 | blkcg_policy[i]->pd_free_fn(blkg->pd[i]); |
| 133 | if (blkg->parent) |
| 134 | blkg_put(blkg->parent); |
| 135 | list_del_init(&blkg->q_node); |
| 136 | mutex_unlock(&q->blkcg_mutex); |
| 137 | |
| 138 | blk_put_queue(q); |
| 139 | free_percpu(blkg->iostat_cpu); |
| 140 | percpu_ref_exit(&blkg->refcnt); |
| 141 | kfree(blkg); |
| 142 | } |
| 143 | |
| 144 | /** |
| 145 | * blkg_free - free a blkg |
| 146 | * @blkg: blkg to free |
| 147 | * |
| 148 | * Free @blkg which may be partially allocated. |
| 149 | */ |
| 150 | static void blkg_free(struct blkcg_gq *blkg) |
| 151 | { |
| 152 | if (!blkg) |
| 153 | return; |
| 154 | |
| 155 | /* |
| 156 | * Both ->pd_free_fn() and request queue's release handler may |
| 157 | * sleep, so free us by scheduling one work func |
| 158 | */ |
| 159 | INIT_WORK(&blkg->free_work, blkg_free_workfn); |
| 160 | schedule_work(&blkg->free_work); |
| 161 | } |
| 162 | |
| 163 | static void __blkg_release(struct rcu_head *rcu) |
| 164 | { |
| 165 | struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); |
| 166 | |
| 167 | #ifdef CONFIG_BLK_CGROUP_PUNT_BIO |
| 168 | WARN_ON(!bio_list_empty(&blkg->async_bios)); |
| 169 | #endif |
| 170 | |
| 171 | /* release the blkcg and parent blkg refs this blkg has been holding */ |
| 172 | css_put(&blkg->blkcg->css); |
| 173 | blkg_free(blkg); |
| 174 | } |
| 175 | |
| 176 | /* |
| 177 | * A group is RCU protected, but having an rcu lock does not mean that one |
| 178 | * can access all the fields of blkg and assume these are valid. For |
| 179 | * example, don't try to follow throtl_data and request queue links. |
| 180 | * |
| 181 | * Having a reference to blkg under an rcu allows accesses to only values |
| 182 | * local to groups like group stats and group rate limits. |
| 183 | */ |
| 184 | static void blkg_release(struct percpu_ref *ref) |
| 185 | { |
| 186 | struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); |
| 187 | |
| 188 | call_rcu(&blkg->rcu_head, __blkg_release); |
| 189 | } |
| 190 | |
| 191 | #ifdef CONFIG_BLK_CGROUP_PUNT_BIO |
| 192 | static struct workqueue_struct *blkcg_punt_bio_wq; |
| 193 | |
| 194 | static void blkg_async_bio_workfn(struct work_struct *work) |
| 195 | { |
| 196 | struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, |
| 197 | async_bio_work); |
| 198 | struct bio_list bios = BIO_EMPTY_LIST; |
| 199 | struct bio *bio; |
| 200 | struct blk_plug plug; |
| 201 | bool need_plug = false; |
| 202 | |
| 203 | /* as long as there are pending bios, @blkg can't go away */ |
| 204 | spin_lock(&blkg->async_bio_lock); |
| 205 | bio_list_merge(&bios, &blkg->async_bios); |
| 206 | bio_list_init(&blkg->async_bios); |
| 207 | spin_unlock(&blkg->async_bio_lock); |
| 208 | |
| 209 | /* start plug only when bio_list contains at least 2 bios */ |
| 210 | if (bios.head && bios.head->bi_next) { |
| 211 | need_plug = true; |
| 212 | blk_start_plug(&plug); |
| 213 | } |
| 214 | while ((bio = bio_list_pop(&bios))) |
| 215 | submit_bio(bio); |
| 216 | if (need_plug) |
| 217 | blk_finish_plug(&plug); |
| 218 | } |
| 219 | |
| 220 | /* |
| 221 | * When a shared kthread issues a bio for a cgroup, doing so synchronously can |
| 222 | * lead to priority inversions as the kthread can be trapped waiting for that |
| 223 | * cgroup. Use this helper instead of submit_bio to punt the actual issuing to |
| 224 | * a dedicated per-blkcg work item to avoid such priority inversions. |
| 225 | */ |
| 226 | void blkcg_punt_bio_submit(struct bio *bio) |
| 227 | { |
| 228 | struct blkcg_gq *blkg = bio->bi_blkg; |
| 229 | |
| 230 | if (blkg->parent) { |
| 231 | spin_lock(&blkg->async_bio_lock); |
| 232 | bio_list_add(&blkg->async_bios, bio); |
| 233 | spin_unlock(&blkg->async_bio_lock); |
| 234 | queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); |
| 235 | } else { |
| 236 | /* never bounce for the root cgroup */ |
| 237 | submit_bio(bio); |
| 238 | } |
| 239 | } |
| 240 | EXPORT_SYMBOL_GPL(blkcg_punt_bio_submit); |
| 241 | |
| 242 | static int __init blkcg_punt_bio_init(void) |
| 243 | { |
| 244 | blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", |
| 245 | WQ_MEM_RECLAIM | WQ_FREEZABLE | |
| 246 | WQ_UNBOUND | WQ_SYSFS, 0); |
| 247 | if (!blkcg_punt_bio_wq) |
| 248 | return -ENOMEM; |
| 249 | return 0; |
| 250 | } |
| 251 | subsys_initcall(blkcg_punt_bio_init); |
| 252 | #endif /* CONFIG_BLK_CGROUP_PUNT_BIO */ |
| 253 | |
| 254 | /** |
| 255 | * bio_blkcg_css - return the blkcg CSS associated with a bio |
| 256 | * @bio: target bio |
| 257 | * |
| 258 | * This returns the CSS for the blkcg associated with a bio, or %NULL if not |
| 259 | * associated. Callers are expected to either handle %NULL or know association |
| 260 | * has been done prior to calling this. |
| 261 | */ |
| 262 | struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio) |
| 263 | { |
| 264 | if (!bio || !bio->bi_blkg) |
| 265 | return NULL; |
| 266 | return &bio->bi_blkg->blkcg->css; |
| 267 | } |
| 268 | EXPORT_SYMBOL_GPL(bio_blkcg_css); |
| 269 | |
| 270 | /** |
| 271 | * blkcg_parent - get the parent of a blkcg |
| 272 | * @blkcg: blkcg of interest |
| 273 | * |
| 274 | * Return the parent blkcg of @blkcg. Can be called anytime. |
| 275 | */ |
| 276 | static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) |
| 277 | { |
| 278 | return css_to_blkcg(blkcg->css.parent); |
| 279 | } |
| 280 | |
| 281 | /** |
| 282 | * blkg_alloc - allocate a blkg |
| 283 | * @blkcg: block cgroup the new blkg is associated with |
| 284 | * @disk: gendisk the new blkg is associated with |
| 285 | * @gfp_mask: allocation mask to use |
| 286 | * |
| 287 | * Allocate a new blkg assocating @blkcg and @q. |
| 288 | */ |
| 289 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, |
| 290 | gfp_t gfp_mask) |
| 291 | { |
| 292 | struct blkcg_gq *blkg; |
| 293 | int i, cpu; |
| 294 | |
| 295 | /* alloc and init base part */ |
| 296 | blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node); |
| 297 | if (!blkg) |
| 298 | return NULL; |
| 299 | if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) |
| 300 | goto out_free_blkg; |
| 301 | blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); |
| 302 | if (!blkg->iostat_cpu) |
| 303 | goto out_exit_refcnt; |
| 304 | if (!blk_get_queue(disk->queue)) |
| 305 | goto out_free_iostat; |
| 306 | |
| 307 | blkg->q = disk->queue; |
| 308 | INIT_LIST_HEAD(&blkg->q_node); |
| 309 | blkg->blkcg = blkcg; |
| 310 | #ifdef CONFIG_BLK_CGROUP_PUNT_BIO |
| 311 | spin_lock_init(&blkg->async_bio_lock); |
| 312 | bio_list_init(&blkg->async_bios); |
| 313 | INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); |
| 314 | #endif |
| 315 | |
| 316 | u64_stats_init(&blkg->iostat.sync); |
| 317 | for_each_possible_cpu(cpu) { |
| 318 | u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); |
| 319 | per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg; |
| 320 | } |
| 321 | |
| 322 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 323 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 324 | struct blkg_policy_data *pd; |
| 325 | |
| 326 | if (!blkcg_policy_enabled(disk->queue, pol)) |
| 327 | continue; |
| 328 | |
| 329 | /* alloc per-policy data and attach it to blkg */ |
| 330 | pd = pol->pd_alloc_fn(disk, blkcg, gfp_mask); |
| 331 | if (!pd) |
| 332 | goto out_free_pds; |
| 333 | blkg->pd[i] = pd; |
| 334 | pd->blkg = blkg; |
| 335 | pd->plid = i; |
| 336 | pd->online = false; |
| 337 | } |
| 338 | |
| 339 | return blkg; |
| 340 | |
| 341 | out_free_pds: |
| 342 | while (--i >= 0) |
| 343 | if (blkg->pd[i]) |
| 344 | blkcg_policy[i]->pd_free_fn(blkg->pd[i]); |
| 345 | blk_put_queue(disk->queue); |
| 346 | out_free_iostat: |
| 347 | free_percpu(blkg->iostat_cpu); |
| 348 | out_exit_refcnt: |
| 349 | percpu_ref_exit(&blkg->refcnt); |
| 350 | out_free_blkg: |
| 351 | kfree(blkg); |
| 352 | return NULL; |
| 353 | } |
| 354 | |
| 355 | /* |
| 356 | * If @new_blkg is %NULL, this function tries to allocate a new one as |
| 357 | * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. |
| 358 | */ |
| 359 | static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk, |
| 360 | struct blkcg_gq *new_blkg) |
| 361 | { |
| 362 | struct blkcg_gq *blkg; |
| 363 | int i, ret; |
| 364 | |
| 365 | lockdep_assert_held(&disk->queue->queue_lock); |
| 366 | |
| 367 | /* request_queue is dying, do not create/recreate a blkg */ |
| 368 | if (blk_queue_dying(disk->queue)) { |
| 369 | ret = -ENODEV; |
| 370 | goto err_free_blkg; |
| 371 | } |
| 372 | |
| 373 | /* blkg holds a reference to blkcg */ |
| 374 | if (!css_tryget_online(&blkcg->css)) { |
| 375 | ret = -ENODEV; |
| 376 | goto err_free_blkg; |
| 377 | } |
| 378 | |
| 379 | /* allocate */ |
| 380 | if (!new_blkg) { |
| 381 | new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN); |
| 382 | if (unlikely(!new_blkg)) { |
| 383 | ret = -ENOMEM; |
| 384 | goto err_put_css; |
| 385 | } |
| 386 | } |
| 387 | blkg = new_blkg; |
| 388 | |
| 389 | /* link parent */ |
| 390 | if (blkcg_parent(blkcg)) { |
| 391 | blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue); |
| 392 | if (WARN_ON_ONCE(!blkg->parent)) { |
| 393 | ret = -ENODEV; |
| 394 | goto err_put_css; |
| 395 | } |
| 396 | blkg_get(blkg->parent); |
| 397 | } |
| 398 | |
| 399 | /* invoke per-policy init */ |
| 400 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 401 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 402 | |
| 403 | if (blkg->pd[i] && pol->pd_init_fn) |
| 404 | pol->pd_init_fn(blkg->pd[i]); |
| 405 | } |
| 406 | |
| 407 | /* insert */ |
| 408 | spin_lock(&blkcg->lock); |
| 409 | ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg); |
| 410 | if (likely(!ret)) { |
| 411 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
| 412 | list_add(&blkg->q_node, &disk->queue->blkg_list); |
| 413 | |
| 414 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 415 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 416 | |
| 417 | if (blkg->pd[i]) { |
| 418 | if (pol->pd_online_fn) |
| 419 | pol->pd_online_fn(blkg->pd[i]); |
| 420 | blkg->pd[i]->online = true; |
| 421 | } |
| 422 | } |
| 423 | } |
| 424 | blkg->online = true; |
| 425 | spin_unlock(&blkcg->lock); |
| 426 | |
| 427 | if (!ret) |
| 428 | return blkg; |
| 429 | |
| 430 | /* @blkg failed fully initialized, use the usual release path */ |
| 431 | blkg_put(blkg); |
| 432 | return ERR_PTR(ret); |
| 433 | |
| 434 | err_put_css: |
| 435 | css_put(&blkcg->css); |
| 436 | err_free_blkg: |
| 437 | if (new_blkg) |
| 438 | blkg_free(new_blkg); |
| 439 | return ERR_PTR(ret); |
| 440 | } |
| 441 | |
| 442 | /** |
| 443 | * blkg_lookup_create - lookup blkg, try to create one if not there |
| 444 | * @blkcg: blkcg of interest |
| 445 | * @disk: gendisk of interest |
| 446 | * |
| 447 | * Lookup blkg for the @blkcg - @disk pair. If it doesn't exist, try to |
| 448 | * create one. blkg creation is performed recursively from blkcg_root such |
| 449 | * that all non-root blkg's have access to the parent blkg. This function |
| 450 | * should be called under RCU read lock and takes @disk->queue->queue_lock. |
| 451 | * |
| 452 | * Returns the blkg or the closest blkg if blkg_create() fails as it walks |
| 453 | * down from root. |
| 454 | */ |
| 455 | static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
| 456 | struct gendisk *disk) |
| 457 | { |
| 458 | struct request_queue *q = disk->queue; |
| 459 | struct blkcg_gq *blkg; |
| 460 | unsigned long flags; |
| 461 | |
| 462 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 463 | |
| 464 | blkg = blkg_lookup(blkcg, q); |
| 465 | if (blkg) |
| 466 | return blkg; |
| 467 | |
| 468 | spin_lock_irqsave(&q->queue_lock, flags); |
| 469 | blkg = blkg_lookup(blkcg, q); |
| 470 | if (blkg) { |
| 471 | if (blkcg != &blkcg_root && |
| 472 | blkg != rcu_dereference(blkcg->blkg_hint)) |
| 473 | rcu_assign_pointer(blkcg->blkg_hint, blkg); |
| 474 | goto found; |
| 475 | } |
| 476 | |
| 477 | /* |
| 478 | * Create blkgs walking down from blkcg_root to @blkcg, so that all |
| 479 | * non-root blkgs have access to their parents. Returns the closest |
| 480 | * blkg to the intended blkg should blkg_create() fail. |
| 481 | */ |
| 482 | while (true) { |
| 483 | struct blkcg *pos = blkcg; |
| 484 | struct blkcg *parent = blkcg_parent(blkcg); |
| 485 | struct blkcg_gq *ret_blkg = q->root_blkg; |
| 486 | |
| 487 | while (parent) { |
| 488 | blkg = blkg_lookup(parent, q); |
| 489 | if (blkg) { |
| 490 | /* remember closest blkg */ |
| 491 | ret_blkg = blkg; |
| 492 | break; |
| 493 | } |
| 494 | pos = parent; |
| 495 | parent = blkcg_parent(parent); |
| 496 | } |
| 497 | |
| 498 | blkg = blkg_create(pos, disk, NULL); |
| 499 | if (IS_ERR(blkg)) { |
| 500 | blkg = ret_blkg; |
| 501 | break; |
| 502 | } |
| 503 | if (pos == blkcg) |
| 504 | break; |
| 505 | } |
| 506 | |
| 507 | found: |
| 508 | spin_unlock_irqrestore(&q->queue_lock, flags); |
| 509 | return blkg; |
| 510 | } |
| 511 | |
| 512 | static void blkg_destroy(struct blkcg_gq *blkg) |
| 513 | { |
| 514 | struct blkcg *blkcg = blkg->blkcg; |
| 515 | int i; |
| 516 | |
| 517 | lockdep_assert_held(&blkg->q->queue_lock); |
| 518 | lockdep_assert_held(&blkcg->lock); |
| 519 | |
| 520 | /* |
| 521 | * blkg stays on the queue list until blkg_free_workfn(), see details in |
| 522 | * blkg_free_workfn(), hence this function can be called from |
| 523 | * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before |
| 524 | * blkg_free_workfn(). |
| 525 | */ |
| 526 | if (hlist_unhashed(&blkg->blkcg_node)) |
| 527 | return; |
| 528 | |
| 529 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 530 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 531 | |
| 532 | if (blkg->pd[i] && blkg->pd[i]->online) { |
| 533 | blkg->pd[i]->online = false; |
| 534 | if (pol->pd_offline_fn) |
| 535 | pol->pd_offline_fn(blkg->pd[i]); |
| 536 | } |
| 537 | } |
| 538 | |
| 539 | blkg->online = false; |
| 540 | |
| 541 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); |
| 542 | hlist_del_init_rcu(&blkg->blkcg_node); |
| 543 | |
| 544 | /* |
| 545 | * Both setting lookup hint to and clearing it from @blkg are done |
| 546 | * under queue_lock. If it's not pointing to @blkg now, it never |
| 547 | * will. Hint assignment itself can race safely. |
| 548 | */ |
| 549 | if (rcu_access_pointer(blkcg->blkg_hint) == blkg) |
| 550 | rcu_assign_pointer(blkcg->blkg_hint, NULL); |
| 551 | |
| 552 | /* |
| 553 | * Put the reference taken at the time of creation so that when all |
| 554 | * queues are gone, group can be destroyed. |
| 555 | */ |
| 556 | percpu_ref_kill(&blkg->refcnt); |
| 557 | } |
| 558 | |
| 559 | static void blkg_destroy_all(struct gendisk *disk) |
| 560 | { |
| 561 | struct request_queue *q = disk->queue; |
| 562 | struct blkcg_gq *blkg, *n; |
| 563 | int count = BLKG_DESTROY_BATCH_SIZE; |
| 564 | |
| 565 | restart: |
| 566 | spin_lock_irq(&q->queue_lock); |
| 567 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
| 568 | struct blkcg *blkcg = blkg->blkcg; |
| 569 | |
| 570 | spin_lock(&blkcg->lock); |
| 571 | blkg_destroy(blkg); |
| 572 | spin_unlock(&blkcg->lock); |
| 573 | |
| 574 | /* |
| 575 | * in order to avoid holding the spin lock for too long, release |
| 576 | * it when a batch of blkgs are destroyed. |
| 577 | */ |
| 578 | if (!(--count)) { |
| 579 | count = BLKG_DESTROY_BATCH_SIZE; |
| 580 | spin_unlock_irq(&q->queue_lock); |
| 581 | cond_resched(); |
| 582 | goto restart; |
| 583 | } |
| 584 | } |
| 585 | |
| 586 | q->root_blkg = NULL; |
| 587 | spin_unlock_irq(&q->queue_lock); |
| 588 | } |
| 589 | |
| 590 | static int blkcg_reset_stats(struct cgroup_subsys_state *css, |
| 591 | struct cftype *cftype, u64 val) |
| 592 | { |
| 593 | struct blkcg *blkcg = css_to_blkcg(css); |
| 594 | struct blkcg_gq *blkg; |
| 595 | int i, cpu; |
| 596 | |
| 597 | mutex_lock(&blkcg_pol_mutex); |
| 598 | spin_lock_irq(&blkcg->lock); |
| 599 | |
| 600 | /* |
| 601 | * Note that stat reset is racy - it doesn't synchronize against |
| 602 | * stat updates. This is a debug feature which shouldn't exist |
| 603 | * anyway. If you get hit by a race, retry. |
| 604 | */ |
| 605 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
| 606 | for_each_possible_cpu(cpu) { |
| 607 | struct blkg_iostat_set *bis = |
| 608 | per_cpu_ptr(blkg->iostat_cpu, cpu); |
| 609 | memset(bis, 0, sizeof(*bis)); |
| 610 | } |
| 611 | memset(&blkg->iostat, 0, sizeof(blkg->iostat)); |
| 612 | |
| 613 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 614 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 615 | |
| 616 | if (blkg->pd[i] && pol->pd_reset_stats_fn) |
| 617 | pol->pd_reset_stats_fn(blkg->pd[i]); |
| 618 | } |
| 619 | } |
| 620 | |
| 621 | spin_unlock_irq(&blkcg->lock); |
| 622 | mutex_unlock(&blkcg_pol_mutex); |
| 623 | return 0; |
| 624 | } |
| 625 | |
| 626 | const char *blkg_dev_name(struct blkcg_gq *blkg) |
| 627 | { |
| 628 | if (!blkg->q->disk) |
| 629 | return NULL; |
| 630 | return bdi_dev_name(blkg->q->disk->bdi); |
| 631 | } |
| 632 | |
| 633 | /** |
| 634 | * blkcg_print_blkgs - helper for printing per-blkg data |
| 635 | * @sf: seq_file to print to |
| 636 | * @blkcg: blkcg of interest |
| 637 | * @prfill: fill function to print out a blkg |
| 638 | * @pol: policy in question |
| 639 | * @data: data to be passed to @prfill |
| 640 | * @show_total: to print out sum of prfill return values or not |
| 641 | * |
| 642 | * This function invokes @prfill on each blkg of @blkcg if pd for the |
| 643 | * policy specified by @pol exists. @prfill is invoked with @sf, the |
| 644 | * policy data and @data and the matching queue lock held. If @show_total |
| 645 | * is %true, the sum of the return values from @prfill is printed with |
| 646 | * "Total" label at the end. |
| 647 | * |
| 648 | * This is to be used to construct print functions for |
| 649 | * cftype->read_seq_string method. |
| 650 | */ |
| 651 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
| 652 | u64 (*prfill)(struct seq_file *, |
| 653 | struct blkg_policy_data *, int), |
| 654 | const struct blkcg_policy *pol, int data, |
| 655 | bool show_total) |
| 656 | { |
| 657 | struct blkcg_gq *blkg; |
| 658 | u64 total = 0; |
| 659 | |
| 660 | rcu_read_lock(); |
| 661 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
| 662 | spin_lock_irq(&blkg->q->queue_lock); |
| 663 | if (blkcg_policy_enabled(blkg->q, pol)) |
| 664 | total += prfill(sf, blkg->pd[pol->plid], data); |
| 665 | spin_unlock_irq(&blkg->q->queue_lock); |
| 666 | } |
| 667 | rcu_read_unlock(); |
| 668 | |
| 669 | if (show_total) |
| 670 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); |
| 671 | } |
| 672 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
| 673 | |
| 674 | /** |
| 675 | * __blkg_prfill_u64 - prfill helper for a single u64 value |
| 676 | * @sf: seq_file to print to |
| 677 | * @pd: policy private data of interest |
| 678 | * @v: value to print |
| 679 | * |
| 680 | * Print @v to @sf for the device associated with @pd. |
| 681 | */ |
| 682 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
| 683 | { |
| 684 | const char *dname = blkg_dev_name(pd->blkg); |
| 685 | |
| 686 | if (!dname) |
| 687 | return 0; |
| 688 | |
| 689 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); |
| 690 | return v; |
| 691 | } |
| 692 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
| 693 | |
| 694 | /** |
| 695 | * blkg_conf_init - initialize a blkg_conf_ctx |
| 696 | * @ctx: blkg_conf_ctx to initialize |
| 697 | * @input: input string |
| 698 | * |
| 699 | * Initialize @ctx which can be used to parse blkg config input string @input. |
| 700 | * Once initialized, @ctx can be used with blkg_conf_open_bdev() and |
| 701 | * blkg_conf_prep(), and must be cleaned up with blkg_conf_exit(). |
| 702 | */ |
| 703 | void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input) |
| 704 | { |
| 705 | *ctx = (struct blkg_conf_ctx){ .input = input }; |
| 706 | } |
| 707 | EXPORT_SYMBOL_GPL(blkg_conf_init); |
| 708 | |
| 709 | /** |
| 710 | * blkg_conf_open_bdev - parse and open bdev for per-blkg config update |
| 711 | * @ctx: blkg_conf_ctx initialized with blkg_conf_init() |
| 712 | * |
| 713 | * Parse the device node prefix part, MAJ:MIN, of per-blkg config update from |
| 714 | * @ctx->input and get and store the matching bdev in @ctx->bdev. @ctx->body is |
| 715 | * set to point past the device node prefix. |
| 716 | * |
| 717 | * This function may be called multiple times on @ctx and the extra calls become |
| 718 | * NOOPs. blkg_conf_prep() implicitly calls this function. Use this function |
| 719 | * explicitly if bdev access is needed without resolving the blkcg / policy part |
| 720 | * of @ctx->input. Returns -errno on error. |
| 721 | */ |
| 722 | int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx) |
| 723 | { |
| 724 | char *input = ctx->input; |
| 725 | unsigned int major, minor; |
| 726 | struct block_device *bdev; |
| 727 | int key_len; |
| 728 | |
| 729 | if (ctx->bdev) |
| 730 | return 0; |
| 731 | |
| 732 | if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) |
| 733 | return -EINVAL; |
| 734 | |
| 735 | input += key_len; |
| 736 | if (!isspace(*input)) |
| 737 | return -EINVAL; |
| 738 | input = skip_spaces(input); |
| 739 | |
| 740 | bdev = blkdev_get_no_open(MKDEV(major, minor)); |
| 741 | if (!bdev) |
| 742 | return -ENODEV; |
| 743 | if (bdev_is_partition(bdev)) { |
| 744 | blkdev_put_no_open(bdev); |
| 745 | return -ENODEV; |
| 746 | } |
| 747 | |
| 748 | ctx->body = input; |
| 749 | ctx->bdev = bdev; |
| 750 | return 0; |
| 751 | } |
| 752 | |
| 753 | /** |
| 754 | * blkg_conf_prep - parse and prepare for per-blkg config update |
| 755 | * @blkcg: target block cgroup |
| 756 | * @pol: target policy |
| 757 | * @ctx: blkg_conf_ctx initialized with blkg_conf_init() |
| 758 | * |
| 759 | * Parse per-blkg config update from @ctx->input and initialize @ctx |
| 760 | * accordingly. On success, @ctx->body points to the part of @ctx->input |
| 761 | * following MAJ:MIN, @ctx->bdev points to the target block device and |
| 762 | * @ctx->blkg to the blkg being configured. |
| 763 | * |
| 764 | * blkg_conf_open_bdev() may be called on @ctx beforehand. On success, this |
| 765 | * function returns with queue lock held and must be followed by |
| 766 | * blkg_conf_exit(). |
| 767 | */ |
| 768 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
| 769 | struct blkg_conf_ctx *ctx) |
| 770 | __acquires(&bdev->bd_queue->queue_lock) |
| 771 | { |
| 772 | struct gendisk *disk; |
| 773 | struct request_queue *q; |
| 774 | struct blkcg_gq *blkg; |
| 775 | int ret; |
| 776 | |
| 777 | ret = blkg_conf_open_bdev(ctx); |
| 778 | if (ret) |
| 779 | return ret; |
| 780 | |
| 781 | disk = ctx->bdev->bd_disk; |
| 782 | q = disk->queue; |
| 783 | |
| 784 | /* |
| 785 | * blkcg_deactivate_policy() requires queue to be frozen, we can grab |
| 786 | * q_usage_counter to prevent concurrent with blkcg_deactivate_policy(). |
| 787 | */ |
| 788 | ret = blk_queue_enter(q, 0); |
| 789 | if (ret) |
| 790 | goto fail; |
| 791 | |
| 792 | spin_lock_irq(&q->queue_lock); |
| 793 | |
| 794 | if (!blkcg_policy_enabled(q, pol)) { |
| 795 | ret = -EOPNOTSUPP; |
| 796 | goto fail_unlock; |
| 797 | } |
| 798 | |
| 799 | blkg = blkg_lookup(blkcg, q); |
| 800 | if (blkg) |
| 801 | goto success; |
| 802 | |
| 803 | /* |
| 804 | * Create blkgs walking down from blkcg_root to @blkcg, so that all |
| 805 | * non-root blkgs have access to their parents. |
| 806 | */ |
| 807 | while (true) { |
| 808 | struct blkcg *pos = blkcg; |
| 809 | struct blkcg *parent; |
| 810 | struct blkcg_gq *new_blkg; |
| 811 | |
| 812 | parent = blkcg_parent(blkcg); |
| 813 | while (parent && !blkg_lookup(parent, q)) { |
| 814 | pos = parent; |
| 815 | parent = blkcg_parent(parent); |
| 816 | } |
| 817 | |
| 818 | /* Drop locks to do new blkg allocation with GFP_KERNEL. */ |
| 819 | spin_unlock_irq(&q->queue_lock); |
| 820 | |
| 821 | new_blkg = blkg_alloc(pos, disk, GFP_KERNEL); |
| 822 | if (unlikely(!new_blkg)) { |
| 823 | ret = -ENOMEM; |
| 824 | goto fail_exit_queue; |
| 825 | } |
| 826 | |
| 827 | if (radix_tree_preload(GFP_KERNEL)) { |
| 828 | blkg_free(new_blkg); |
| 829 | ret = -ENOMEM; |
| 830 | goto fail_exit_queue; |
| 831 | } |
| 832 | |
| 833 | spin_lock_irq(&q->queue_lock); |
| 834 | |
| 835 | if (!blkcg_policy_enabled(q, pol)) { |
| 836 | blkg_free(new_blkg); |
| 837 | ret = -EOPNOTSUPP; |
| 838 | goto fail_preloaded; |
| 839 | } |
| 840 | |
| 841 | blkg = blkg_lookup(pos, q); |
| 842 | if (blkg) { |
| 843 | blkg_free(new_blkg); |
| 844 | } else { |
| 845 | blkg = blkg_create(pos, disk, new_blkg); |
| 846 | if (IS_ERR(blkg)) { |
| 847 | ret = PTR_ERR(blkg); |
| 848 | goto fail_preloaded; |
| 849 | } |
| 850 | } |
| 851 | |
| 852 | radix_tree_preload_end(); |
| 853 | |
| 854 | if (pos == blkcg) |
| 855 | goto success; |
| 856 | } |
| 857 | success: |
| 858 | blk_queue_exit(q); |
| 859 | ctx->blkg = blkg; |
| 860 | return 0; |
| 861 | |
| 862 | fail_preloaded: |
| 863 | radix_tree_preload_end(); |
| 864 | fail_unlock: |
| 865 | spin_unlock_irq(&q->queue_lock); |
| 866 | fail_exit_queue: |
| 867 | blk_queue_exit(q); |
| 868 | fail: |
| 869 | /* |
| 870 | * If queue was bypassing, we should retry. Do so after a |
| 871 | * short msleep(). It isn't strictly necessary but queue |
| 872 | * can be bypassing for some time and it's always nice to |
| 873 | * avoid busy looping. |
| 874 | */ |
| 875 | if (ret == -EBUSY) { |
| 876 | msleep(10); |
| 877 | ret = restart_syscall(); |
| 878 | } |
| 879 | return ret; |
| 880 | } |
| 881 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
| 882 | |
| 883 | /** |
| 884 | * blkg_conf_exit - clean up per-blkg config update |
| 885 | * @ctx: blkg_conf_ctx initialized with blkg_conf_init() |
| 886 | * |
| 887 | * Clean up after per-blkg config update. This function must be called on all |
| 888 | * blkg_conf_ctx's initialized with blkg_conf_init(). |
| 889 | */ |
| 890 | void blkg_conf_exit(struct blkg_conf_ctx *ctx) |
| 891 | __releases(&ctx->bdev->bd_queue->queue_lock) |
| 892 | { |
| 893 | if (ctx->blkg) { |
| 894 | spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); |
| 895 | ctx->blkg = NULL; |
| 896 | } |
| 897 | |
| 898 | if (ctx->bdev) { |
| 899 | blkdev_put_no_open(ctx->bdev); |
| 900 | ctx->body = NULL; |
| 901 | ctx->bdev = NULL; |
| 902 | } |
| 903 | } |
| 904 | EXPORT_SYMBOL_GPL(blkg_conf_exit); |
| 905 | |
| 906 | static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) |
| 907 | { |
| 908 | int i; |
| 909 | |
| 910 | for (i = 0; i < BLKG_IOSTAT_NR; i++) { |
| 911 | dst->bytes[i] = src->bytes[i]; |
| 912 | dst->ios[i] = src->ios[i]; |
| 913 | } |
| 914 | } |
| 915 | |
| 916 | static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src) |
| 917 | { |
| 918 | int i; |
| 919 | |
| 920 | for (i = 0; i < BLKG_IOSTAT_NR; i++) { |
| 921 | dst->bytes[i] += src->bytes[i]; |
| 922 | dst->ios[i] += src->ios[i]; |
| 923 | } |
| 924 | } |
| 925 | |
| 926 | static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src) |
| 927 | { |
| 928 | int i; |
| 929 | |
| 930 | for (i = 0; i < BLKG_IOSTAT_NR; i++) { |
| 931 | dst->bytes[i] -= src->bytes[i]; |
| 932 | dst->ios[i] -= src->ios[i]; |
| 933 | } |
| 934 | } |
| 935 | |
| 936 | static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur, |
| 937 | struct blkg_iostat *last) |
| 938 | { |
| 939 | struct blkg_iostat delta; |
| 940 | unsigned long flags; |
| 941 | |
| 942 | /* propagate percpu delta to global */ |
| 943 | flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); |
| 944 | blkg_iostat_set(&delta, cur); |
| 945 | blkg_iostat_sub(&delta, last); |
| 946 | blkg_iostat_add(&blkg->iostat.cur, &delta); |
| 947 | blkg_iostat_add(last, &delta); |
| 948 | u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); |
| 949 | } |
| 950 | |
| 951 | static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) |
| 952 | { |
| 953 | struct blkcg *blkcg = css_to_blkcg(css); |
| 954 | struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu); |
| 955 | struct llist_node *lnode; |
| 956 | struct blkg_iostat_set *bisc, *next_bisc; |
| 957 | |
| 958 | /* Root-level stats are sourced from system-wide IO stats */ |
| 959 | if (!cgroup_parent(css->cgroup)) |
| 960 | return; |
| 961 | |
| 962 | rcu_read_lock(); |
| 963 | |
| 964 | lnode = llist_del_all(lhead); |
| 965 | if (!lnode) |
| 966 | goto out; |
| 967 | |
| 968 | /* |
| 969 | * Iterate only the iostat_cpu's queued in the lockless list. |
| 970 | */ |
| 971 | llist_for_each_entry_safe(bisc, next_bisc, lnode, lnode) { |
| 972 | struct blkcg_gq *blkg = bisc->blkg; |
| 973 | struct blkcg_gq *parent = blkg->parent; |
| 974 | struct blkg_iostat cur; |
| 975 | unsigned int seq; |
| 976 | |
| 977 | WRITE_ONCE(bisc->lqueued, false); |
| 978 | |
| 979 | /* fetch the current per-cpu values */ |
| 980 | do { |
| 981 | seq = u64_stats_fetch_begin(&bisc->sync); |
| 982 | blkg_iostat_set(&cur, &bisc->cur); |
| 983 | } while (u64_stats_fetch_retry(&bisc->sync, seq)); |
| 984 | |
| 985 | blkcg_iostat_update(blkg, &cur, &bisc->last); |
| 986 | |
| 987 | /* propagate global delta to parent (unless that's root) */ |
| 988 | if (parent && parent->parent) |
| 989 | blkcg_iostat_update(parent, &blkg->iostat.cur, |
| 990 | &blkg->iostat.last); |
| 991 | percpu_ref_put(&blkg->refcnt); |
| 992 | } |
| 993 | |
| 994 | out: |
| 995 | rcu_read_unlock(); |
| 996 | } |
| 997 | |
| 998 | /* |
| 999 | * We source root cgroup stats from the system-wide stats to avoid |
| 1000 | * tracking the same information twice and incurring overhead when no |
| 1001 | * cgroups are defined. For that reason, cgroup_rstat_flush in |
| 1002 | * blkcg_print_stat does not actually fill out the iostat in the root |
| 1003 | * cgroup's blkcg_gq. |
| 1004 | * |
| 1005 | * However, we would like to re-use the printing code between the root and |
| 1006 | * non-root cgroups to the extent possible. For that reason, we simulate |
| 1007 | * flushing the root cgroup's stats by explicitly filling in the iostat |
| 1008 | * with disk level statistics. |
| 1009 | */ |
| 1010 | static void blkcg_fill_root_iostats(void) |
| 1011 | { |
| 1012 | struct class_dev_iter iter; |
| 1013 | struct device *dev; |
| 1014 | |
| 1015 | class_dev_iter_init(&iter, &block_class, NULL, &disk_type); |
| 1016 | while ((dev = class_dev_iter_next(&iter))) { |
| 1017 | struct block_device *bdev = dev_to_bdev(dev); |
| 1018 | struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg; |
| 1019 | struct blkg_iostat tmp; |
| 1020 | int cpu; |
| 1021 | unsigned long flags; |
| 1022 | |
| 1023 | memset(&tmp, 0, sizeof(tmp)); |
| 1024 | for_each_possible_cpu(cpu) { |
| 1025 | struct disk_stats *cpu_dkstats; |
| 1026 | |
| 1027 | cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu); |
| 1028 | tmp.ios[BLKG_IOSTAT_READ] += |
| 1029 | cpu_dkstats->ios[STAT_READ]; |
| 1030 | tmp.ios[BLKG_IOSTAT_WRITE] += |
| 1031 | cpu_dkstats->ios[STAT_WRITE]; |
| 1032 | tmp.ios[BLKG_IOSTAT_DISCARD] += |
| 1033 | cpu_dkstats->ios[STAT_DISCARD]; |
| 1034 | // convert sectors to bytes |
| 1035 | tmp.bytes[BLKG_IOSTAT_READ] += |
| 1036 | cpu_dkstats->sectors[STAT_READ] << 9; |
| 1037 | tmp.bytes[BLKG_IOSTAT_WRITE] += |
| 1038 | cpu_dkstats->sectors[STAT_WRITE] << 9; |
| 1039 | tmp.bytes[BLKG_IOSTAT_DISCARD] += |
| 1040 | cpu_dkstats->sectors[STAT_DISCARD] << 9; |
| 1041 | } |
| 1042 | |
| 1043 | flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); |
| 1044 | blkg_iostat_set(&blkg->iostat.cur, &tmp); |
| 1045 | u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); |
| 1046 | } |
| 1047 | } |
| 1048 | |
| 1049 | static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) |
| 1050 | { |
| 1051 | struct blkg_iostat_set *bis = &blkg->iostat; |
| 1052 | u64 rbytes, wbytes, rios, wios, dbytes, dios; |
| 1053 | const char *dname; |
| 1054 | unsigned seq; |
| 1055 | int i; |
| 1056 | |
| 1057 | if (!blkg->online) |
| 1058 | return; |
| 1059 | |
| 1060 | dname = blkg_dev_name(blkg); |
| 1061 | if (!dname) |
| 1062 | return; |
| 1063 | |
| 1064 | seq_printf(s, "%s ", dname); |
| 1065 | |
| 1066 | do { |
| 1067 | seq = u64_stats_fetch_begin(&bis->sync); |
| 1068 | |
| 1069 | rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; |
| 1070 | wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; |
| 1071 | dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; |
| 1072 | rios = bis->cur.ios[BLKG_IOSTAT_READ]; |
| 1073 | wios = bis->cur.ios[BLKG_IOSTAT_WRITE]; |
| 1074 | dios = bis->cur.ios[BLKG_IOSTAT_DISCARD]; |
| 1075 | } while (u64_stats_fetch_retry(&bis->sync, seq)); |
| 1076 | |
| 1077 | if (rbytes || wbytes || rios || wios) { |
| 1078 | seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu", |
| 1079 | rbytes, wbytes, rios, wios, |
| 1080 | dbytes, dios); |
| 1081 | } |
| 1082 | |
| 1083 | if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { |
| 1084 | seq_printf(s, " use_delay=%d delay_nsec=%llu", |
| 1085 | atomic_read(&blkg->use_delay), |
| 1086 | atomic64_read(&blkg->delay_nsec)); |
| 1087 | } |
| 1088 | |
| 1089 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 1090 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 1091 | |
| 1092 | if (!blkg->pd[i] || !pol->pd_stat_fn) |
| 1093 | continue; |
| 1094 | |
| 1095 | pol->pd_stat_fn(blkg->pd[i], s); |
| 1096 | } |
| 1097 | |
| 1098 | seq_puts(s, "\n"); |
| 1099 | } |
| 1100 | |
| 1101 | static int blkcg_print_stat(struct seq_file *sf, void *v) |
| 1102 | { |
| 1103 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); |
| 1104 | struct blkcg_gq *blkg; |
| 1105 | |
| 1106 | if (!seq_css(sf)->parent) |
| 1107 | blkcg_fill_root_iostats(); |
| 1108 | else |
| 1109 | cgroup_rstat_flush(blkcg->css.cgroup); |
| 1110 | |
| 1111 | rcu_read_lock(); |
| 1112 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
| 1113 | spin_lock_irq(&blkg->q->queue_lock); |
| 1114 | blkcg_print_one_stat(blkg, sf); |
| 1115 | spin_unlock_irq(&blkg->q->queue_lock); |
| 1116 | } |
| 1117 | rcu_read_unlock(); |
| 1118 | return 0; |
| 1119 | } |
| 1120 | |
| 1121 | static struct cftype blkcg_files[] = { |
| 1122 | { |
| 1123 | .name = "stat", |
| 1124 | .seq_show = blkcg_print_stat, |
| 1125 | }, |
| 1126 | { } /* terminate */ |
| 1127 | }; |
| 1128 | |
| 1129 | static struct cftype blkcg_legacy_files[] = { |
| 1130 | { |
| 1131 | .name = "reset_stats", |
| 1132 | .write_u64 = blkcg_reset_stats, |
| 1133 | }, |
| 1134 | { } /* terminate */ |
| 1135 | }; |
| 1136 | |
| 1137 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 1138 | struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css) |
| 1139 | { |
| 1140 | return &css_to_blkcg(css)->cgwb_list; |
| 1141 | } |
| 1142 | #endif |
| 1143 | |
| 1144 | /* |
| 1145 | * blkcg destruction is a three-stage process. |
| 1146 | * |
| 1147 | * 1. Destruction starts. The blkcg_css_offline() callback is invoked |
| 1148 | * which offlines writeback. Here we tie the next stage of blkg destruction |
| 1149 | * to the completion of writeback associated with the blkcg. This lets us |
| 1150 | * avoid punting potentially large amounts of outstanding writeback to root |
| 1151 | * while maintaining any ongoing policies. The next stage is triggered when |
| 1152 | * the nr_cgwbs count goes to zero. |
| 1153 | * |
| 1154 | * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called |
| 1155 | * and handles the destruction of blkgs. Here the css reference held by |
| 1156 | * the blkg is put back eventually allowing blkcg_css_free() to be called. |
| 1157 | * This work may occur in cgwb_release_workfn() on the cgwb_release |
| 1158 | * workqueue. Any submitted ios that fail to get the blkg ref will be |
| 1159 | * punted to the root_blkg. |
| 1160 | * |
| 1161 | * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. |
| 1162 | * This finally frees the blkcg. |
| 1163 | */ |
| 1164 | |
| 1165 | /** |
| 1166 | * blkcg_destroy_blkgs - responsible for shooting down blkgs |
| 1167 | * @blkcg: blkcg of interest |
| 1168 | * |
| 1169 | * blkgs should be removed while holding both q and blkcg locks. As blkcg lock |
| 1170 | * is nested inside q lock, this function performs reverse double lock dancing. |
| 1171 | * Destroying the blkgs releases the reference held on the blkcg's css allowing |
| 1172 | * blkcg_css_free to eventually be called. |
| 1173 | * |
| 1174 | * This is the blkcg counterpart of ioc_release_fn(). |
| 1175 | */ |
| 1176 | static void blkcg_destroy_blkgs(struct blkcg *blkcg) |
| 1177 | { |
| 1178 | might_sleep(); |
| 1179 | |
| 1180 | spin_lock_irq(&blkcg->lock); |
| 1181 | |
| 1182 | while (!hlist_empty(&blkcg->blkg_list)) { |
| 1183 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
| 1184 | struct blkcg_gq, blkcg_node); |
| 1185 | struct request_queue *q = blkg->q; |
| 1186 | |
| 1187 | if (need_resched() || !spin_trylock(&q->queue_lock)) { |
| 1188 | /* |
| 1189 | * Given that the system can accumulate a huge number |
| 1190 | * of blkgs in pathological cases, check to see if we |
| 1191 | * need to rescheduling to avoid softlockup. |
| 1192 | */ |
| 1193 | spin_unlock_irq(&blkcg->lock); |
| 1194 | cond_resched(); |
| 1195 | spin_lock_irq(&blkcg->lock); |
| 1196 | continue; |
| 1197 | } |
| 1198 | |
| 1199 | blkg_destroy(blkg); |
| 1200 | spin_unlock(&q->queue_lock); |
| 1201 | } |
| 1202 | |
| 1203 | spin_unlock_irq(&blkcg->lock); |
| 1204 | } |
| 1205 | |
| 1206 | /** |
| 1207 | * blkcg_pin_online - pin online state |
| 1208 | * @blkcg_css: blkcg of interest |
| 1209 | * |
| 1210 | * While pinned, a blkcg is kept online. This is primarily used to |
| 1211 | * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline |
| 1212 | * while an associated cgwb is still active. |
| 1213 | */ |
| 1214 | void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css) |
| 1215 | { |
| 1216 | refcount_inc(&css_to_blkcg(blkcg_css)->online_pin); |
| 1217 | } |
| 1218 | |
| 1219 | /** |
| 1220 | * blkcg_unpin_online - unpin online state |
| 1221 | * @blkcg_css: blkcg of interest |
| 1222 | * |
| 1223 | * This is primarily used to impedance-match blkg and cgwb lifetimes so |
| 1224 | * that blkg doesn't go offline while an associated cgwb is still active. |
| 1225 | * When this count goes to zero, all active cgwbs have finished so the |
| 1226 | * blkcg can continue destruction by calling blkcg_destroy_blkgs(). |
| 1227 | */ |
| 1228 | void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css) |
| 1229 | { |
| 1230 | struct blkcg *blkcg = css_to_blkcg(blkcg_css); |
| 1231 | |
| 1232 | do { |
| 1233 | if (!refcount_dec_and_test(&blkcg->online_pin)) |
| 1234 | break; |
| 1235 | blkcg_destroy_blkgs(blkcg); |
| 1236 | blkcg = blkcg_parent(blkcg); |
| 1237 | } while (blkcg); |
| 1238 | } |
| 1239 | |
| 1240 | /** |
| 1241 | * blkcg_css_offline - cgroup css_offline callback |
| 1242 | * @css: css of interest |
| 1243 | * |
| 1244 | * This function is called when @css is about to go away. Here the cgwbs are |
| 1245 | * offlined first and only once writeback associated with the blkcg has |
| 1246 | * finished do we start step 2 (see above). |
| 1247 | */ |
| 1248 | static void blkcg_css_offline(struct cgroup_subsys_state *css) |
| 1249 | { |
| 1250 | /* this prevents anyone from attaching or migrating to this blkcg */ |
| 1251 | wb_blkcg_offline(css); |
| 1252 | |
| 1253 | /* put the base online pin allowing step 2 to be triggered */ |
| 1254 | blkcg_unpin_online(css); |
| 1255 | } |
| 1256 | |
| 1257 | static void blkcg_css_free(struct cgroup_subsys_state *css) |
| 1258 | { |
| 1259 | struct blkcg *blkcg = css_to_blkcg(css); |
| 1260 | int i; |
| 1261 | |
| 1262 | mutex_lock(&blkcg_pol_mutex); |
| 1263 | |
| 1264 | list_del(&blkcg->all_blkcgs_node); |
| 1265 | |
| 1266 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
| 1267 | if (blkcg->cpd[i]) |
| 1268 | blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); |
| 1269 | |
| 1270 | mutex_unlock(&blkcg_pol_mutex); |
| 1271 | |
| 1272 | free_percpu(blkcg->lhead); |
| 1273 | kfree(blkcg); |
| 1274 | } |
| 1275 | |
| 1276 | static struct cgroup_subsys_state * |
| 1277 | blkcg_css_alloc(struct cgroup_subsys_state *parent_css) |
| 1278 | { |
| 1279 | struct blkcg *blkcg; |
| 1280 | int i; |
| 1281 | |
| 1282 | mutex_lock(&blkcg_pol_mutex); |
| 1283 | |
| 1284 | if (!parent_css) { |
| 1285 | blkcg = &blkcg_root; |
| 1286 | } else { |
| 1287 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
| 1288 | if (!blkcg) |
| 1289 | goto unlock; |
| 1290 | } |
| 1291 | |
| 1292 | if (init_blkcg_llists(blkcg)) |
| 1293 | goto free_blkcg; |
| 1294 | |
| 1295 | for (i = 0; i < BLKCG_MAX_POLS ; i++) { |
| 1296 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 1297 | struct blkcg_policy_data *cpd; |
| 1298 | |
| 1299 | /* |
| 1300 | * If the policy hasn't been attached yet, wait for it |
| 1301 | * to be attached before doing anything else. Otherwise, |
| 1302 | * check if the policy requires any specific per-cgroup |
| 1303 | * data: if it does, allocate and initialize it. |
| 1304 | */ |
| 1305 | if (!pol || !pol->cpd_alloc_fn) |
| 1306 | continue; |
| 1307 | |
| 1308 | cpd = pol->cpd_alloc_fn(GFP_KERNEL); |
| 1309 | if (!cpd) |
| 1310 | goto free_pd_blkcg; |
| 1311 | |
| 1312 | blkcg->cpd[i] = cpd; |
| 1313 | cpd->blkcg = blkcg; |
| 1314 | cpd->plid = i; |
| 1315 | } |
| 1316 | |
| 1317 | spin_lock_init(&blkcg->lock); |
| 1318 | refcount_set(&blkcg->online_pin, 1); |
| 1319 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); |
| 1320 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
| 1321 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 1322 | INIT_LIST_HEAD(&blkcg->cgwb_list); |
| 1323 | #endif |
| 1324 | list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); |
| 1325 | |
| 1326 | mutex_unlock(&blkcg_pol_mutex); |
| 1327 | return &blkcg->css; |
| 1328 | |
| 1329 | free_pd_blkcg: |
| 1330 | for (i--; i >= 0; i--) |
| 1331 | if (blkcg->cpd[i]) |
| 1332 | blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); |
| 1333 | free_percpu(blkcg->lhead); |
| 1334 | free_blkcg: |
| 1335 | if (blkcg != &blkcg_root) |
| 1336 | kfree(blkcg); |
| 1337 | unlock: |
| 1338 | mutex_unlock(&blkcg_pol_mutex); |
| 1339 | return ERR_PTR(-ENOMEM); |
| 1340 | } |
| 1341 | |
| 1342 | static int blkcg_css_online(struct cgroup_subsys_state *css) |
| 1343 | { |
| 1344 | struct blkcg *parent = blkcg_parent(css_to_blkcg(css)); |
| 1345 | |
| 1346 | /* |
| 1347 | * blkcg_pin_online() is used to delay blkcg offline so that blkgs |
| 1348 | * don't go offline while cgwbs are still active on them. Pin the |
| 1349 | * parent so that offline always happens towards the root. |
| 1350 | */ |
| 1351 | if (parent) |
| 1352 | blkcg_pin_online(&parent->css); |
| 1353 | return 0; |
| 1354 | } |
| 1355 | |
| 1356 | int blkcg_init_disk(struct gendisk *disk) |
| 1357 | { |
| 1358 | struct request_queue *q = disk->queue; |
| 1359 | struct blkcg_gq *new_blkg, *blkg; |
| 1360 | bool preloaded; |
| 1361 | int ret; |
| 1362 | |
| 1363 | INIT_LIST_HEAD(&q->blkg_list); |
| 1364 | mutex_init(&q->blkcg_mutex); |
| 1365 | |
| 1366 | new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL); |
| 1367 | if (!new_blkg) |
| 1368 | return -ENOMEM; |
| 1369 | |
| 1370 | preloaded = !radix_tree_preload(GFP_KERNEL); |
| 1371 | |
| 1372 | /* Make sure the root blkg exists. */ |
| 1373 | /* spin_lock_irq can serve as RCU read-side critical section. */ |
| 1374 | spin_lock_irq(&q->queue_lock); |
| 1375 | blkg = blkg_create(&blkcg_root, disk, new_blkg); |
| 1376 | if (IS_ERR(blkg)) |
| 1377 | goto err_unlock; |
| 1378 | q->root_blkg = blkg; |
| 1379 | spin_unlock_irq(&q->queue_lock); |
| 1380 | |
| 1381 | if (preloaded) |
| 1382 | radix_tree_preload_end(); |
| 1383 | |
| 1384 | ret = blk_ioprio_init(disk); |
| 1385 | if (ret) |
| 1386 | goto err_destroy_all; |
| 1387 | |
| 1388 | ret = blk_throtl_init(disk); |
| 1389 | if (ret) |
| 1390 | goto err_ioprio_exit; |
| 1391 | |
| 1392 | return 0; |
| 1393 | |
| 1394 | err_ioprio_exit: |
| 1395 | blk_ioprio_exit(disk); |
| 1396 | err_destroy_all: |
| 1397 | blkg_destroy_all(disk); |
| 1398 | return ret; |
| 1399 | err_unlock: |
| 1400 | spin_unlock_irq(&q->queue_lock); |
| 1401 | if (preloaded) |
| 1402 | radix_tree_preload_end(); |
| 1403 | return PTR_ERR(blkg); |
| 1404 | } |
| 1405 | |
| 1406 | void blkcg_exit_disk(struct gendisk *disk) |
| 1407 | { |
| 1408 | blkg_destroy_all(disk); |
| 1409 | blk_throtl_exit(disk); |
| 1410 | } |
| 1411 | |
| 1412 | static void blkcg_exit(struct task_struct *tsk) |
| 1413 | { |
| 1414 | if (tsk->throttle_disk) |
| 1415 | put_disk(tsk->throttle_disk); |
| 1416 | tsk->throttle_disk = NULL; |
| 1417 | } |
| 1418 | |
| 1419 | struct cgroup_subsys io_cgrp_subsys = { |
| 1420 | .css_alloc = blkcg_css_alloc, |
| 1421 | .css_online = blkcg_css_online, |
| 1422 | .css_offline = blkcg_css_offline, |
| 1423 | .css_free = blkcg_css_free, |
| 1424 | .css_rstat_flush = blkcg_rstat_flush, |
| 1425 | .dfl_cftypes = blkcg_files, |
| 1426 | .legacy_cftypes = blkcg_legacy_files, |
| 1427 | .legacy_name = "blkio", |
| 1428 | .exit = blkcg_exit, |
| 1429 | #ifdef CONFIG_MEMCG |
| 1430 | /* |
| 1431 | * This ensures that, if available, memcg is automatically enabled |
| 1432 | * together on the default hierarchy so that the owner cgroup can |
| 1433 | * be retrieved from writeback pages. |
| 1434 | */ |
| 1435 | .depends_on = 1 << memory_cgrp_id, |
| 1436 | #endif |
| 1437 | }; |
| 1438 | EXPORT_SYMBOL_GPL(io_cgrp_subsys); |
| 1439 | |
| 1440 | /** |
| 1441 | * blkcg_activate_policy - activate a blkcg policy on a gendisk |
| 1442 | * @disk: gendisk of interest |
| 1443 | * @pol: blkcg policy to activate |
| 1444 | * |
| 1445 | * Activate @pol on @disk. Requires %GFP_KERNEL context. @disk goes through |
| 1446 | * bypass mode to populate its blkgs with policy_data for @pol. |
| 1447 | * |
| 1448 | * Activation happens with @disk bypassed, so nobody would be accessing blkgs |
| 1449 | * from IO path. Update of each blkg is protected by both queue and blkcg |
| 1450 | * locks so that holding either lock and testing blkcg_policy_enabled() is |
| 1451 | * always enough for dereferencing policy data. |
| 1452 | * |
| 1453 | * The caller is responsible for synchronizing [de]activations and policy |
| 1454 | * [un]registerations. Returns 0 on success, -errno on failure. |
| 1455 | */ |
| 1456 | int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol) |
| 1457 | { |
| 1458 | struct request_queue *q = disk->queue; |
| 1459 | struct blkg_policy_data *pd_prealloc = NULL; |
| 1460 | struct blkcg_gq *blkg, *pinned_blkg = NULL; |
| 1461 | int ret; |
| 1462 | |
| 1463 | if (blkcg_policy_enabled(q, pol)) |
| 1464 | return 0; |
| 1465 | |
| 1466 | if (queue_is_mq(q)) |
| 1467 | blk_mq_freeze_queue(q); |
| 1468 | retry: |
| 1469 | spin_lock_irq(&q->queue_lock); |
| 1470 | |
| 1471 | /* blkg_list is pushed at the head, reverse walk to allocate parents first */ |
| 1472 | list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { |
| 1473 | struct blkg_policy_data *pd; |
| 1474 | |
| 1475 | if (blkg->pd[pol->plid]) |
| 1476 | continue; |
| 1477 | |
| 1478 | /* If prealloc matches, use it; otherwise try GFP_NOWAIT */ |
| 1479 | if (blkg == pinned_blkg) { |
| 1480 | pd = pd_prealloc; |
| 1481 | pd_prealloc = NULL; |
| 1482 | } else { |
| 1483 | pd = pol->pd_alloc_fn(disk, blkg->blkcg, |
| 1484 | GFP_NOWAIT | __GFP_NOWARN); |
| 1485 | } |
| 1486 | |
| 1487 | if (!pd) { |
| 1488 | /* |
| 1489 | * GFP_NOWAIT failed. Free the existing one and |
| 1490 | * prealloc for @blkg w/ GFP_KERNEL. |
| 1491 | */ |
| 1492 | if (pinned_blkg) |
| 1493 | blkg_put(pinned_blkg); |
| 1494 | blkg_get(blkg); |
| 1495 | pinned_blkg = blkg; |
| 1496 | |
| 1497 | spin_unlock_irq(&q->queue_lock); |
| 1498 | |
| 1499 | if (pd_prealloc) |
| 1500 | pol->pd_free_fn(pd_prealloc); |
| 1501 | pd_prealloc = pol->pd_alloc_fn(disk, blkg->blkcg, |
| 1502 | GFP_KERNEL); |
| 1503 | if (pd_prealloc) |
| 1504 | goto retry; |
| 1505 | else |
| 1506 | goto enomem; |
| 1507 | } |
| 1508 | |
| 1509 | blkg->pd[pol->plid] = pd; |
| 1510 | pd->blkg = blkg; |
| 1511 | pd->plid = pol->plid; |
| 1512 | pd->online = false; |
| 1513 | } |
| 1514 | |
| 1515 | /* all allocated, init in the same order */ |
| 1516 | if (pol->pd_init_fn) |
| 1517 | list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) |
| 1518 | pol->pd_init_fn(blkg->pd[pol->plid]); |
| 1519 | |
| 1520 | list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { |
| 1521 | if (pol->pd_online_fn) |
| 1522 | pol->pd_online_fn(blkg->pd[pol->plid]); |
| 1523 | blkg->pd[pol->plid]->online = true; |
| 1524 | } |
| 1525 | |
| 1526 | __set_bit(pol->plid, q->blkcg_pols); |
| 1527 | ret = 0; |
| 1528 | |
| 1529 | spin_unlock_irq(&q->queue_lock); |
| 1530 | out: |
| 1531 | if (queue_is_mq(q)) |
| 1532 | blk_mq_unfreeze_queue(q); |
| 1533 | if (pinned_blkg) |
| 1534 | blkg_put(pinned_blkg); |
| 1535 | if (pd_prealloc) |
| 1536 | pol->pd_free_fn(pd_prealloc); |
| 1537 | return ret; |
| 1538 | |
| 1539 | enomem: |
| 1540 | /* alloc failed, nothing's initialized yet, free everything */ |
| 1541 | spin_lock_irq(&q->queue_lock); |
| 1542 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
| 1543 | struct blkcg *blkcg = blkg->blkcg; |
| 1544 | |
| 1545 | spin_lock(&blkcg->lock); |
| 1546 | if (blkg->pd[pol->plid]) { |
| 1547 | pol->pd_free_fn(blkg->pd[pol->plid]); |
| 1548 | blkg->pd[pol->plid] = NULL; |
| 1549 | } |
| 1550 | spin_unlock(&blkcg->lock); |
| 1551 | } |
| 1552 | spin_unlock_irq(&q->queue_lock); |
| 1553 | ret = -ENOMEM; |
| 1554 | goto out; |
| 1555 | } |
| 1556 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); |
| 1557 | |
| 1558 | /** |
| 1559 | * blkcg_deactivate_policy - deactivate a blkcg policy on a gendisk |
| 1560 | * @disk: gendisk of interest |
| 1561 | * @pol: blkcg policy to deactivate |
| 1562 | * |
| 1563 | * Deactivate @pol on @disk. Follows the same synchronization rules as |
| 1564 | * blkcg_activate_policy(). |
| 1565 | */ |
| 1566 | void blkcg_deactivate_policy(struct gendisk *disk, |
| 1567 | const struct blkcg_policy *pol) |
| 1568 | { |
| 1569 | struct request_queue *q = disk->queue; |
| 1570 | struct blkcg_gq *blkg; |
| 1571 | |
| 1572 | if (!blkcg_policy_enabled(q, pol)) |
| 1573 | return; |
| 1574 | |
| 1575 | if (queue_is_mq(q)) |
| 1576 | blk_mq_freeze_queue(q); |
| 1577 | |
| 1578 | mutex_lock(&q->blkcg_mutex); |
| 1579 | spin_lock_irq(&q->queue_lock); |
| 1580 | |
| 1581 | __clear_bit(pol->plid, q->blkcg_pols); |
| 1582 | |
| 1583 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
| 1584 | struct blkcg *blkcg = blkg->blkcg; |
| 1585 | |
| 1586 | spin_lock(&blkcg->lock); |
| 1587 | if (blkg->pd[pol->plid]) { |
| 1588 | if (blkg->pd[pol->plid]->online && pol->pd_offline_fn) |
| 1589 | pol->pd_offline_fn(blkg->pd[pol->plid]); |
| 1590 | pol->pd_free_fn(blkg->pd[pol->plid]); |
| 1591 | blkg->pd[pol->plid] = NULL; |
| 1592 | } |
| 1593 | spin_unlock(&blkcg->lock); |
| 1594 | } |
| 1595 | |
| 1596 | spin_unlock_irq(&q->queue_lock); |
| 1597 | mutex_unlock(&q->blkcg_mutex); |
| 1598 | |
| 1599 | if (queue_is_mq(q)) |
| 1600 | blk_mq_unfreeze_queue(q); |
| 1601 | } |
| 1602 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); |
| 1603 | |
| 1604 | static void blkcg_free_all_cpd(struct blkcg_policy *pol) |
| 1605 | { |
| 1606 | struct blkcg *blkcg; |
| 1607 | |
| 1608 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
| 1609 | if (blkcg->cpd[pol->plid]) { |
| 1610 | pol->cpd_free_fn(blkcg->cpd[pol->plid]); |
| 1611 | blkcg->cpd[pol->plid] = NULL; |
| 1612 | } |
| 1613 | } |
| 1614 | } |
| 1615 | |
| 1616 | /** |
| 1617 | * blkcg_policy_register - register a blkcg policy |
| 1618 | * @pol: blkcg policy to register |
| 1619 | * |
| 1620 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
| 1621 | * successful registration. Returns 0 on success and -errno on failure. |
| 1622 | */ |
| 1623 | int blkcg_policy_register(struct blkcg_policy *pol) |
| 1624 | { |
| 1625 | struct blkcg *blkcg; |
| 1626 | int i, ret; |
| 1627 | |
| 1628 | mutex_lock(&blkcg_pol_register_mutex); |
| 1629 | mutex_lock(&blkcg_pol_mutex); |
| 1630 | |
| 1631 | /* find an empty slot */ |
| 1632 | ret = -ENOSPC; |
| 1633 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
| 1634 | if (!blkcg_policy[i]) |
| 1635 | break; |
| 1636 | if (i >= BLKCG_MAX_POLS) { |
| 1637 | pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); |
| 1638 | goto err_unlock; |
| 1639 | } |
| 1640 | |
| 1641 | /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ |
| 1642 | if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || |
| 1643 | (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) |
| 1644 | goto err_unlock; |
| 1645 | |
| 1646 | /* register @pol */ |
| 1647 | pol->plid = i; |
| 1648 | blkcg_policy[pol->plid] = pol; |
| 1649 | |
| 1650 | /* allocate and install cpd's */ |
| 1651 | if (pol->cpd_alloc_fn) { |
| 1652 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
| 1653 | struct blkcg_policy_data *cpd; |
| 1654 | |
| 1655 | cpd = pol->cpd_alloc_fn(GFP_KERNEL); |
| 1656 | if (!cpd) |
| 1657 | goto err_free_cpds; |
| 1658 | |
| 1659 | blkcg->cpd[pol->plid] = cpd; |
| 1660 | cpd->blkcg = blkcg; |
| 1661 | cpd->plid = pol->plid; |
| 1662 | } |
| 1663 | } |
| 1664 | |
| 1665 | mutex_unlock(&blkcg_pol_mutex); |
| 1666 | |
| 1667 | /* everything is in place, add intf files for the new policy */ |
| 1668 | if (pol->dfl_cftypes) |
| 1669 | WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, |
| 1670 | pol->dfl_cftypes)); |
| 1671 | if (pol->legacy_cftypes) |
| 1672 | WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, |
| 1673 | pol->legacy_cftypes)); |
| 1674 | mutex_unlock(&blkcg_pol_register_mutex); |
| 1675 | return 0; |
| 1676 | |
| 1677 | err_free_cpds: |
| 1678 | if (pol->cpd_free_fn) |
| 1679 | blkcg_free_all_cpd(pol); |
| 1680 | |
| 1681 | blkcg_policy[pol->plid] = NULL; |
| 1682 | err_unlock: |
| 1683 | mutex_unlock(&blkcg_pol_mutex); |
| 1684 | mutex_unlock(&blkcg_pol_register_mutex); |
| 1685 | return ret; |
| 1686 | } |
| 1687 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
| 1688 | |
| 1689 | /** |
| 1690 | * blkcg_policy_unregister - unregister a blkcg policy |
| 1691 | * @pol: blkcg policy to unregister |
| 1692 | * |
| 1693 | * Undo blkcg_policy_register(@pol). Might sleep. |
| 1694 | */ |
| 1695 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
| 1696 | { |
| 1697 | mutex_lock(&blkcg_pol_register_mutex); |
| 1698 | |
| 1699 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
| 1700 | goto out_unlock; |
| 1701 | |
| 1702 | /* kill the intf files first */ |
| 1703 | if (pol->dfl_cftypes) |
| 1704 | cgroup_rm_cftypes(pol->dfl_cftypes); |
| 1705 | if (pol->legacy_cftypes) |
| 1706 | cgroup_rm_cftypes(pol->legacy_cftypes); |
| 1707 | |
| 1708 | /* remove cpds and unregister */ |
| 1709 | mutex_lock(&blkcg_pol_mutex); |
| 1710 | |
| 1711 | if (pol->cpd_free_fn) |
| 1712 | blkcg_free_all_cpd(pol); |
| 1713 | |
| 1714 | blkcg_policy[pol->plid] = NULL; |
| 1715 | |
| 1716 | mutex_unlock(&blkcg_pol_mutex); |
| 1717 | out_unlock: |
| 1718 | mutex_unlock(&blkcg_pol_register_mutex); |
| 1719 | } |
| 1720 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |
| 1721 | |
| 1722 | /* |
| 1723 | * Scale the accumulated delay based on how long it has been since we updated |
| 1724 | * the delay. We only call this when we are adding delay, in case it's been a |
| 1725 | * while since we added delay, and when we are checking to see if we need to |
| 1726 | * delay a task, to account for any delays that may have occurred. |
| 1727 | */ |
| 1728 | static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) |
| 1729 | { |
| 1730 | u64 old = atomic64_read(&blkg->delay_start); |
| 1731 | |
| 1732 | /* negative use_delay means no scaling, see blkcg_set_delay() */ |
| 1733 | if (atomic_read(&blkg->use_delay) < 0) |
| 1734 | return; |
| 1735 | |
| 1736 | /* |
| 1737 | * We only want to scale down every second. The idea here is that we |
| 1738 | * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain |
| 1739 | * time window. We only want to throttle tasks for recent delay that |
| 1740 | * has occurred, in 1 second time windows since that's the maximum |
| 1741 | * things can be throttled. We save the current delay window in |
| 1742 | * blkg->last_delay so we know what amount is still left to be charged |
| 1743 | * to the blkg from this point onward. blkg->last_use keeps track of |
| 1744 | * the use_delay counter. The idea is if we're unthrottling the blkg we |
| 1745 | * are ok with whatever is happening now, and we can take away more of |
| 1746 | * the accumulated delay as we've already throttled enough that |
| 1747 | * everybody is happy with their IO latencies. |
| 1748 | */ |
| 1749 | if (time_before64(old + NSEC_PER_SEC, now) && |
| 1750 | atomic64_try_cmpxchg(&blkg->delay_start, &old, now)) { |
| 1751 | u64 cur = atomic64_read(&blkg->delay_nsec); |
| 1752 | u64 sub = min_t(u64, blkg->last_delay, now - old); |
| 1753 | int cur_use = atomic_read(&blkg->use_delay); |
| 1754 | |
| 1755 | /* |
| 1756 | * We've been unthrottled, subtract a larger chunk of our |
| 1757 | * accumulated delay. |
| 1758 | */ |
| 1759 | if (cur_use < blkg->last_use) |
| 1760 | sub = max_t(u64, sub, blkg->last_delay >> 1); |
| 1761 | |
| 1762 | /* |
| 1763 | * This shouldn't happen, but handle it anyway. Our delay_nsec |
| 1764 | * should only ever be growing except here where we subtract out |
| 1765 | * min(last_delay, 1 second), but lord knows bugs happen and I'd |
| 1766 | * rather not end up with negative numbers. |
| 1767 | */ |
| 1768 | if (unlikely(cur < sub)) { |
| 1769 | atomic64_set(&blkg->delay_nsec, 0); |
| 1770 | blkg->last_delay = 0; |
| 1771 | } else { |
| 1772 | atomic64_sub(sub, &blkg->delay_nsec); |
| 1773 | blkg->last_delay = cur - sub; |
| 1774 | } |
| 1775 | blkg->last_use = cur_use; |
| 1776 | } |
| 1777 | } |
| 1778 | |
| 1779 | /* |
| 1780 | * This is called when we want to actually walk up the hierarchy and check to |
| 1781 | * see if we need to throttle, and then actually throttle if there is some |
| 1782 | * accumulated delay. This should only be called upon return to user space so |
| 1783 | * we're not holding some lock that would induce a priority inversion. |
| 1784 | */ |
| 1785 | static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) |
| 1786 | { |
| 1787 | unsigned long pflags; |
| 1788 | bool clamp; |
| 1789 | u64 now = ktime_to_ns(ktime_get()); |
| 1790 | u64 exp; |
| 1791 | u64 delay_nsec = 0; |
| 1792 | int tok; |
| 1793 | |
| 1794 | while (blkg->parent) { |
| 1795 | int use_delay = atomic_read(&blkg->use_delay); |
| 1796 | |
| 1797 | if (use_delay) { |
| 1798 | u64 this_delay; |
| 1799 | |
| 1800 | blkcg_scale_delay(blkg, now); |
| 1801 | this_delay = atomic64_read(&blkg->delay_nsec); |
| 1802 | if (this_delay > delay_nsec) { |
| 1803 | delay_nsec = this_delay; |
| 1804 | clamp = use_delay > 0; |
| 1805 | } |
| 1806 | } |
| 1807 | blkg = blkg->parent; |
| 1808 | } |
| 1809 | |
| 1810 | if (!delay_nsec) |
| 1811 | return; |
| 1812 | |
| 1813 | /* |
| 1814 | * Let's not sleep for all eternity if we've amassed a huge delay. |
| 1815 | * Swapping or metadata IO can accumulate 10's of seconds worth of |
| 1816 | * delay, and we want userspace to be able to do _something_ so cap the |
| 1817 | * delays at 0.25s. If there's 10's of seconds worth of delay then the |
| 1818 | * tasks will be delayed for 0.25 second for every syscall. If |
| 1819 | * blkcg_set_delay() was used as indicated by negative use_delay, the |
| 1820 | * caller is responsible for regulating the range. |
| 1821 | */ |
| 1822 | if (clamp) |
| 1823 | delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); |
| 1824 | |
| 1825 | if (use_memdelay) |
| 1826 | psi_memstall_enter(&pflags); |
| 1827 | |
| 1828 | exp = ktime_add_ns(now, delay_nsec); |
| 1829 | tok = io_schedule_prepare(); |
| 1830 | do { |
| 1831 | __set_current_state(TASK_KILLABLE); |
| 1832 | if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS)) |
| 1833 | break; |
| 1834 | } while (!fatal_signal_pending(current)); |
| 1835 | io_schedule_finish(tok); |
| 1836 | |
| 1837 | if (use_memdelay) |
| 1838 | psi_memstall_leave(&pflags); |
| 1839 | } |
| 1840 | |
| 1841 | /** |
| 1842 | * blkcg_maybe_throttle_current - throttle the current task if it has been marked |
| 1843 | * |
| 1844 | * This is only called if we've been marked with set_notify_resume(). Obviously |
| 1845 | * we can be set_notify_resume() for reasons other than blkcg throttling, so we |
| 1846 | * check to see if current->throttle_disk is set and if not this doesn't do |
| 1847 | * anything. This should only ever be called by the resume code, it's not meant |
| 1848 | * to be called by people willy-nilly as it will actually do the work to |
| 1849 | * throttle the task if it is setup for throttling. |
| 1850 | */ |
| 1851 | void blkcg_maybe_throttle_current(void) |
| 1852 | { |
| 1853 | struct gendisk *disk = current->throttle_disk; |
| 1854 | struct blkcg *blkcg; |
| 1855 | struct blkcg_gq *blkg; |
| 1856 | bool use_memdelay = current->use_memdelay; |
| 1857 | |
| 1858 | if (!disk) |
| 1859 | return; |
| 1860 | |
| 1861 | current->throttle_disk = NULL; |
| 1862 | current->use_memdelay = false; |
| 1863 | |
| 1864 | rcu_read_lock(); |
| 1865 | blkcg = css_to_blkcg(blkcg_css()); |
| 1866 | if (!blkcg) |
| 1867 | goto out; |
| 1868 | blkg = blkg_lookup(blkcg, disk->queue); |
| 1869 | if (!blkg) |
| 1870 | goto out; |
| 1871 | if (!blkg_tryget(blkg)) |
| 1872 | goto out; |
| 1873 | rcu_read_unlock(); |
| 1874 | |
| 1875 | blkcg_maybe_throttle_blkg(blkg, use_memdelay); |
| 1876 | blkg_put(blkg); |
| 1877 | put_disk(disk); |
| 1878 | return; |
| 1879 | out: |
| 1880 | rcu_read_unlock(); |
| 1881 | } |
| 1882 | |
| 1883 | /** |
| 1884 | * blkcg_schedule_throttle - this task needs to check for throttling |
| 1885 | * @disk: disk to throttle |
| 1886 | * @use_memdelay: do we charge this to memory delay for PSI |
| 1887 | * |
| 1888 | * This is called by the IO controller when we know there's delay accumulated |
| 1889 | * for the blkg for this task. We do not pass the blkg because there are places |
| 1890 | * we call this that may not have that information, the swapping code for |
| 1891 | * instance will only have a block_device at that point. This set's the |
| 1892 | * notify_resume for the task to check and see if it requires throttling before |
| 1893 | * returning to user space. |
| 1894 | * |
| 1895 | * We will only schedule once per syscall. You can call this over and over |
| 1896 | * again and it will only do the check once upon return to user space, and only |
| 1897 | * throttle once. If the task needs to be throttled again it'll need to be |
| 1898 | * re-set at the next time we see the task. |
| 1899 | */ |
| 1900 | void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay) |
| 1901 | { |
| 1902 | if (unlikely(current->flags & PF_KTHREAD)) |
| 1903 | return; |
| 1904 | |
| 1905 | if (current->throttle_disk != disk) { |
| 1906 | if (test_bit(GD_DEAD, &disk->state)) |
| 1907 | return; |
| 1908 | get_device(disk_to_dev(disk)); |
| 1909 | |
| 1910 | if (current->throttle_disk) |
| 1911 | put_disk(current->throttle_disk); |
| 1912 | current->throttle_disk = disk; |
| 1913 | } |
| 1914 | |
| 1915 | if (use_memdelay) |
| 1916 | current->use_memdelay = use_memdelay; |
| 1917 | set_notify_resume(current); |
| 1918 | } |
| 1919 | |
| 1920 | /** |
| 1921 | * blkcg_add_delay - add delay to this blkg |
| 1922 | * @blkg: blkg of interest |
| 1923 | * @now: the current time in nanoseconds |
| 1924 | * @delta: how many nanoseconds of delay to add |
| 1925 | * |
| 1926 | * Charge @delta to the blkg's current delay accumulation. This is used to |
| 1927 | * throttle tasks if an IO controller thinks we need more throttling. |
| 1928 | */ |
| 1929 | void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) |
| 1930 | { |
| 1931 | if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) |
| 1932 | return; |
| 1933 | blkcg_scale_delay(blkg, now); |
| 1934 | atomic64_add(delta, &blkg->delay_nsec); |
| 1935 | } |
| 1936 | |
| 1937 | /** |
| 1938 | * blkg_tryget_closest - try and get a blkg ref on the closet blkg |
| 1939 | * @bio: target bio |
| 1940 | * @css: target css |
| 1941 | * |
| 1942 | * As the failure mode here is to walk up the blkg tree, this ensure that the |
| 1943 | * blkg->parent pointers are always valid. This returns the blkg that it ended |
| 1944 | * up taking a reference on or %NULL if no reference was taken. |
| 1945 | */ |
| 1946 | static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, |
| 1947 | struct cgroup_subsys_state *css) |
| 1948 | { |
| 1949 | struct blkcg_gq *blkg, *ret_blkg = NULL; |
| 1950 | |
| 1951 | rcu_read_lock(); |
| 1952 | blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk); |
| 1953 | while (blkg) { |
| 1954 | if (blkg_tryget(blkg)) { |
| 1955 | ret_blkg = blkg; |
| 1956 | break; |
| 1957 | } |
| 1958 | blkg = blkg->parent; |
| 1959 | } |
| 1960 | rcu_read_unlock(); |
| 1961 | |
| 1962 | return ret_blkg; |
| 1963 | } |
| 1964 | |
| 1965 | /** |
| 1966 | * bio_associate_blkg_from_css - associate a bio with a specified css |
| 1967 | * @bio: target bio |
| 1968 | * @css: target css |
| 1969 | * |
| 1970 | * Associate @bio with the blkg found by combining the css's blkg and the |
| 1971 | * request_queue of the @bio. An association failure is handled by walking up |
| 1972 | * the blkg tree. Therefore, the blkg associated can be anything between @blkg |
| 1973 | * and q->root_blkg. This situation only happens when a cgroup is dying and |
| 1974 | * then the remaining bios will spill to the closest alive blkg. |
| 1975 | * |
| 1976 | * A reference will be taken on the blkg and will be released when @bio is |
| 1977 | * freed. |
| 1978 | */ |
| 1979 | void bio_associate_blkg_from_css(struct bio *bio, |
| 1980 | struct cgroup_subsys_state *css) |
| 1981 | { |
| 1982 | if (bio->bi_blkg) |
| 1983 | blkg_put(bio->bi_blkg); |
| 1984 | |
| 1985 | if (css && css->parent) { |
| 1986 | bio->bi_blkg = blkg_tryget_closest(bio, css); |
| 1987 | } else { |
| 1988 | blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); |
| 1989 | bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; |
| 1990 | } |
| 1991 | } |
| 1992 | EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); |
| 1993 | |
| 1994 | /** |
| 1995 | * bio_associate_blkg - associate a bio with a blkg |
| 1996 | * @bio: target bio |
| 1997 | * |
| 1998 | * Associate @bio with the blkg found from the bio's css and request_queue. |
| 1999 | * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is |
| 2000 | * already associated, the css is reused and association redone as the |
| 2001 | * request_queue may have changed. |
| 2002 | */ |
| 2003 | void bio_associate_blkg(struct bio *bio) |
| 2004 | { |
| 2005 | struct cgroup_subsys_state *css; |
| 2006 | |
| 2007 | rcu_read_lock(); |
| 2008 | |
| 2009 | if (bio->bi_blkg) |
| 2010 | css = bio_blkcg_css(bio); |
| 2011 | else |
| 2012 | css = blkcg_css(); |
| 2013 | |
| 2014 | bio_associate_blkg_from_css(bio, css); |
| 2015 | |
| 2016 | rcu_read_unlock(); |
| 2017 | } |
| 2018 | EXPORT_SYMBOL_GPL(bio_associate_blkg); |
| 2019 | |
| 2020 | /** |
| 2021 | * bio_clone_blkg_association - clone blkg association from src to dst bio |
| 2022 | * @dst: destination bio |
| 2023 | * @src: source bio |
| 2024 | */ |
| 2025 | void bio_clone_blkg_association(struct bio *dst, struct bio *src) |
| 2026 | { |
| 2027 | if (src->bi_blkg) |
| 2028 | bio_associate_blkg_from_css(dst, bio_blkcg_css(src)); |
| 2029 | } |
| 2030 | EXPORT_SYMBOL_GPL(bio_clone_blkg_association); |
| 2031 | |
| 2032 | static int blk_cgroup_io_type(struct bio *bio) |
| 2033 | { |
| 2034 | if (op_is_discard(bio->bi_opf)) |
| 2035 | return BLKG_IOSTAT_DISCARD; |
| 2036 | if (op_is_write(bio->bi_opf)) |
| 2037 | return BLKG_IOSTAT_WRITE; |
| 2038 | return BLKG_IOSTAT_READ; |
| 2039 | } |
| 2040 | |
| 2041 | void blk_cgroup_bio_start(struct bio *bio) |
| 2042 | { |
| 2043 | struct blkcg *blkcg = bio->bi_blkg->blkcg; |
| 2044 | int rwd = blk_cgroup_io_type(bio), cpu; |
| 2045 | struct blkg_iostat_set *bis; |
| 2046 | unsigned long flags; |
| 2047 | |
| 2048 | /* Root-level stats are sourced from system-wide IO stats */ |
| 2049 | if (!cgroup_parent(blkcg->css.cgroup)) |
| 2050 | return; |
| 2051 | |
| 2052 | cpu = get_cpu(); |
| 2053 | bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); |
| 2054 | flags = u64_stats_update_begin_irqsave(&bis->sync); |
| 2055 | |
| 2056 | /* |
| 2057 | * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split |
| 2058 | * bio and we would have already accounted for the size of the bio. |
| 2059 | */ |
| 2060 | if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { |
| 2061 | bio_set_flag(bio, BIO_CGROUP_ACCT); |
| 2062 | bis->cur.bytes[rwd] += bio->bi_iter.bi_size; |
| 2063 | } |
| 2064 | bis->cur.ios[rwd]++; |
| 2065 | |
| 2066 | /* |
| 2067 | * If the iostat_cpu isn't in a lockless list, put it into the |
| 2068 | * list to indicate that a stat update is pending. |
| 2069 | */ |
| 2070 | if (!READ_ONCE(bis->lqueued)) { |
| 2071 | struct llist_head *lhead = this_cpu_ptr(blkcg->lhead); |
| 2072 | |
| 2073 | llist_add(&bis->lnode, lhead); |
| 2074 | WRITE_ONCE(bis->lqueued, true); |
| 2075 | percpu_ref_get(&bis->blkg->refcnt); |
| 2076 | } |
| 2077 | |
| 2078 | u64_stats_update_end_irqrestore(&bis->sync, flags); |
| 2079 | if (cgroup_subsys_on_dfl(io_cgrp_subsys)) |
| 2080 | cgroup_rstat_updated(blkcg->css.cgroup, cpu); |
| 2081 | put_cpu(); |
| 2082 | } |
| 2083 | |
| 2084 | bool blk_cgroup_congested(void) |
| 2085 | { |
| 2086 | struct cgroup_subsys_state *css; |
| 2087 | bool ret = false; |
| 2088 | |
| 2089 | rcu_read_lock(); |
| 2090 | for (css = blkcg_css(); css; css = css->parent) { |
| 2091 | if (atomic_read(&css->cgroup->congestion_count)) { |
| 2092 | ret = true; |
| 2093 | break; |
| 2094 | } |
| 2095 | } |
| 2096 | rcu_read_unlock(); |
| 2097 | return ret; |
| 2098 | } |
| 2099 | |
| 2100 | module_param(blkcg_debug_stats, bool, 0644); |
| 2101 | MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not"); |