1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Common Block IO controller cgroup interface
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
17 #include <linux/cgroup.h>
18 #include <linux/percpu_counter.h>
19 #include <linux/seq_file.h>
20 #include <linux/radix-tree.h>
21 #include <linux/blkdev.h>
22 #include <linux/atomic.h>
23 #include <linux/kthread.h>
25 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
26 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
28 /* Max limits for throttle policy */
29 #define THROTL_IOPS_MAX UINT_MAX
31 #ifdef CONFIG_BLK_CGROUP
33 enum blkg_rwstat_type {
41 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
47 struct cgroup_subsys_state css;
50 struct radix_tree_root blkg_tree;
51 struct blkcg_gq __rcu *blkg_hint;
52 struct hlist_head blkg_list;
54 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
56 struct list_head all_blkcgs_node;
57 #ifdef CONFIG_CGROUP_WRITEBACK
58 struct list_head cgwb_list;
59 refcount_t cgwb_refcnt;
64 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
65 * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
66 * to carry result values from read and sum operations.
69 struct percpu_counter cpu_cnt;
74 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
75 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
79 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
80 * request_queue (q). This is used by blkcg policies which need to track
81 * information per blkcg - q pair.
83 * There can be multiple active blkcg policies and each blkg:policy pair is
84 * represented by a blkg_policy_data which is allocated and freed by each
85 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
86 * area by allocating larger data structure which embeds blkg_policy_data
89 struct blkg_policy_data {
90 /* the blkg and policy id this per-policy data belongs to */
91 struct blkcg_gq *blkg;
96 * Policies that need to keep per-blkcg data which is independent from any
97 * request_queue associated to it should implement cpd_alloc/free_fn()
98 * methods. A policy can allocate private data area by allocating larger
99 * data structure which embeds blkcg_policy_data at the beginning.
100 * cpd_init() is invoked to let each policy handle per-blkcg data.
102 struct blkcg_policy_data {
103 /* the blkcg and policy id this per-policy data belongs to */
108 /* association between a blk cgroup and a request queue */
110 /* Pointer to the associated request_queue */
111 struct request_queue *q;
112 struct list_head q_node;
113 struct hlist_node blkcg_node;
117 * Each blkg gets congested separately and the congestion state is
118 * propagated to the matching bdi_writeback_congested.
120 struct bdi_writeback_congested *wb_congested;
122 /* all non-root blkcg_gq's are guaranteed to have access to parent */
123 struct blkcg_gq *parent;
125 /* reference count */
128 /* is this blkg online? protected by both blkcg and q locks */
131 struct blkg_rwstat stat_bytes;
132 struct blkg_rwstat stat_ios;
134 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
136 struct rcu_head rcu_head;
139 atomic64_t delay_nsec;
140 atomic64_t delay_start;
145 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
146 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
147 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
148 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
149 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
150 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
151 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
152 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
153 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
154 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
155 typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
158 struct blkcg_policy {
160 /* cgroup files for the policy */
161 struct cftype *dfl_cftypes;
162 struct cftype *legacy_cftypes;
165 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
166 blkcg_pol_init_cpd_fn *cpd_init_fn;
167 blkcg_pol_free_cpd_fn *cpd_free_fn;
168 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
170 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
171 blkcg_pol_init_pd_fn *pd_init_fn;
172 blkcg_pol_online_pd_fn *pd_online_fn;
173 blkcg_pol_offline_pd_fn *pd_offline_fn;
174 blkcg_pol_free_pd_fn *pd_free_fn;
175 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
176 blkcg_pol_stat_pd_fn *pd_stat_fn;
179 extern struct blkcg blkcg_root;
180 extern struct cgroup_subsys_state * const blkcg_root_css;
182 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
183 struct request_queue *q, bool update_hint);
184 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
185 struct request_queue *q);
186 int blkcg_init_queue(struct request_queue *q);
187 void blkcg_drain_queue(struct request_queue *q);
188 void blkcg_exit_queue(struct request_queue *q);
190 /* Blkio controller policy registration */
191 int blkcg_policy_register(struct blkcg_policy *pol);
192 void blkcg_policy_unregister(struct blkcg_policy *pol);
193 int blkcg_activate_policy(struct request_queue *q,
194 const struct blkcg_policy *pol);
195 void blkcg_deactivate_policy(struct request_queue *q,
196 const struct blkcg_policy *pol);
198 const char *blkg_dev_name(struct blkcg_gq *blkg);
199 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
200 u64 (*prfill)(struct seq_file *,
201 struct blkg_policy_data *, int),
202 const struct blkcg_policy *pol, int data,
204 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
205 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
206 const struct blkg_rwstat *rwstat);
207 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
208 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
210 int blkg_print_stat_bytes(struct seq_file *sf, void *v);
211 int blkg_print_stat_ios(struct seq_file *sf, void *v);
212 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
213 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
215 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
216 struct blkcg_policy *pol, int off);
217 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
218 struct blkcg_policy *pol, int off);
220 struct blkg_conf_ctx {
221 struct gendisk *disk;
222 struct blkcg_gq *blkg;
226 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
227 char *input, struct blkg_conf_ctx *ctx);
228 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
231 * blkcg_css - find the current css
233 * Find the css associated with either the kthread or the current task.
234 * This may return a dying css, so it is up to the caller to use tryget logic
235 * to confirm it is alive and well.
237 static inline struct cgroup_subsys_state *blkcg_css(void)
239 struct cgroup_subsys_state *css;
241 css = kthread_blkcg();
244 return task_css(current, io_cgrp_id);
248 * blkcg_get_css - find and get a reference to the css
250 * Find the css associated with either the kthread or the current task.
251 * This takes a reference on the blkcg which will need to be managed by the
254 static inline struct cgroup_subsys_state *blkcg_get_css(void)
256 struct cgroup_subsys_state *css;
260 css = kthread_blkcg();
265 * This is a bit complicated. It is possible task_css() is
266 * seeing an old css pointer here. This is caused by the
267 * current thread migrating away from this cgroup and this
268 * cgroup dying. css_tryget() will fail when trying to take a
269 * ref on a cgroup that's ref count has hit 0.
271 * Therefore, if it does fail, this means current must have
272 * been swapped away already and this is waiting for it to
273 * propagate on the polling cpu. Hence the use of cpu_relax().
276 css = task_css(current, io_cgrp_id);
277 if (likely(css_tryget(css)))
288 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
290 return css ? container_of(css, struct blkcg, css) : NULL;
294 * __bio_blkcg - internal, inconsistent version to get blkcg
297 * This function is inconsistent and consequently is dangerous to use. The
298 * first part of the function returns a blkcg where a reference is owned by the
299 * bio. This means it does not need to be rcu protected as it cannot go away
300 * with the bio owning a reference to it. However, the latter potentially gets
301 * it from task_css(). This can race against task migration and the cgroup
302 * dying. It is also semantically different as it must be called rcu protected
303 * and is susceptible to failure when trying to get a reference to it.
304 * Therefore, it is not ok to assume that *_get() will always succeed on the
305 * blkcg returned here.
307 static inline struct blkcg *__bio_blkcg(struct bio *bio)
309 if (bio && bio->bi_css)
310 return css_to_blkcg(bio->bi_css);
311 return css_to_blkcg(blkcg_css());
315 * bio_blkcg - grab the blkcg associated with a bio
318 * This returns the blkcg associated with a bio, %NULL if not associated.
319 * Callers are expected to either handle %NULL or know association has been
320 * done prior to calling this.
322 static inline struct blkcg *bio_blkcg(struct bio *bio)
324 if (bio && bio->bi_css)
325 return css_to_blkcg(bio->bi_css);
329 static inline bool blk_cgroup_congested(void)
331 struct cgroup_subsys_state *css;
335 css = kthread_blkcg();
337 css = task_css(current, io_cgrp_id);
339 if (atomic_read(&css->cgroup->congestion_count)) {
350 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
351 * @return: true if this bio needs to be submitted with the root blkg context.
353 * In order to avoid priority inversions we sometimes need to issue a bio as if
354 * it were attached to the root blkg, and then backcharge to the actual owning
355 * blkg. The idea is we do bio_blkcg() to look up the actual context for the
356 * bio and attach the appropriate blkg to the bio. Then we call this helper and
357 * if it is true run with the root blkg for that queue and then do any
358 * backcharging to the originating cgroup once the io is complete.
360 static inline bool bio_issue_as_root_blkg(struct bio *bio)
362 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
366 * blkcg_parent - get the parent of a blkcg
367 * @blkcg: blkcg of interest
369 * Return the parent blkcg of @blkcg. Can be called anytime.
371 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
373 return css_to_blkcg(blkcg->css.parent);
377 * __blkg_lookup - internal version of blkg_lookup()
378 * @blkcg: blkcg of interest
379 * @q: request_queue of interest
380 * @update_hint: whether to update lookup hint with the result or not
382 * This is internal version and shouldn't be used by policy
383 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
384 * @q's bypass state. If @update_hint is %true, the caller should be
385 * holding @q->queue_lock and lookup hint is updated on success.
387 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
388 struct request_queue *q,
391 struct blkcg_gq *blkg;
393 if (blkcg == &blkcg_root)
396 blkg = rcu_dereference(blkcg->blkg_hint);
397 if (blkg && blkg->q == q)
400 return blkg_lookup_slowpath(blkcg, q, update_hint);
404 * blkg_lookup - lookup blkg for the specified blkcg - q pair
405 * @blkcg: blkcg of interest
406 * @q: request_queue of interest
408 * Lookup blkg for the @blkcg - @q pair. This function should be called
409 * under RCU read loc.
411 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
412 struct request_queue *q)
414 WARN_ON_ONCE(!rcu_read_lock_held());
415 return __blkg_lookup(blkcg, q, false);
419 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
420 * @q: request_queue of interest
422 * Lookup blkg for @q at the root level. See also blkg_lookup().
424 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
430 * blkg_to_pdata - get policy private data
431 * @blkg: blkg of interest
432 * @pol: policy of interest
434 * Return pointer to private data associated with the @blkg-@pol pair.
436 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
437 struct blkcg_policy *pol)
439 return blkg ? blkg->pd[pol->plid] : NULL;
442 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
443 struct blkcg_policy *pol)
445 return blkcg ? blkcg->cpd[pol->plid] : NULL;
449 * pdata_to_blkg - get blkg associated with policy private data
450 * @pd: policy private data of interest
452 * @pd is policy private data. Determine the blkg it's associated with.
454 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
456 return pd ? pd->blkg : NULL;
459 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
461 return cpd ? cpd->blkcg : NULL;
464 extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
466 #ifdef CONFIG_CGROUP_WRITEBACK
469 * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
470 * @blkcg: blkcg of interest
472 * This is used to track the number of active wb's related to a blkcg.
474 static inline void blkcg_cgwb_get(struct blkcg *blkcg)
476 refcount_inc(&blkcg->cgwb_refcnt);
480 * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
481 * @blkcg: blkcg of interest
483 * This is used to track the number of active wb's related to a blkcg.
484 * When this count goes to zero, all active wb has finished so the
485 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
486 * This work may occur in cgwb_release_workfn() on the cgwb_release
489 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
491 if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
492 blkcg_destroy_blkgs(blkcg);
497 static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
499 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
501 /* wb isn't being accounted, so trigger destruction right away */
502 blkcg_destroy_blkgs(blkcg);
508 * blkg_path - format cgroup path of blkg
509 * @blkg: blkg of interest
510 * @buf: target buffer
511 * @buflen: target buffer length
513 * Format the path of the cgroup of @blkg into @buf.
515 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
517 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
521 * blkg_get - get a blkg reference
524 * The caller should be holding an existing reference.
526 static inline void blkg_get(struct blkcg_gq *blkg)
528 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
529 atomic_inc(&blkg->refcnt);
533 * blkg_try_get - try and get a blkg reference
536 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
537 * of freeing this blkg, so we can only use it if the refcnt is not zero.
539 static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
541 if (atomic_inc_not_zero(&blkg->refcnt))
547 void __blkg_release_rcu(struct rcu_head *rcu);
550 * blkg_put - put a blkg reference
553 static inline void blkg_put(struct blkcg_gq *blkg)
555 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
556 if (atomic_dec_and_test(&blkg->refcnt))
557 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
561 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
562 * @d_blkg: loop cursor pointing to the current descendant
563 * @pos_css: used for iteration
564 * @p_blkg: target blkg to walk descendants of
566 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
567 * read locked. If called under either blkcg or queue lock, the iteration
568 * is guaranteed to include all and only online blkgs. The caller may
569 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
570 * @p_blkg is included in the iteration and the first node to be visited.
572 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
573 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
574 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
575 (p_blkg)->q, false)))
578 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
579 * @d_blkg: loop cursor pointing to the current descendant
580 * @pos_css: used for iteration
581 * @p_blkg: target blkg to walk descendants of
583 * Similar to blkg_for_each_descendant_pre() but performs post-order
584 * traversal instead. Synchronization rules are the same. @p_blkg is
585 * included in the iteration and the last node to be visited.
587 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
588 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
589 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
590 (p_blkg)->q, false)))
592 static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
596 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
600 atomic64_set(&stat->aux_cnt, 0);
604 static inline void blkg_stat_exit(struct blkg_stat *stat)
606 percpu_counter_destroy(&stat->cpu_cnt);
610 * blkg_stat_add - add a value to a blkg_stat
611 * @stat: target blkg_stat
614 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
615 * don't re-enter this function for the same counter.
617 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
619 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
623 * blkg_stat_read - read the current value of a blkg_stat
624 * @stat: blkg_stat to read
626 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
628 return percpu_counter_sum_positive(&stat->cpu_cnt);
632 * blkg_stat_reset - reset a blkg_stat
633 * @stat: blkg_stat to reset
635 static inline void blkg_stat_reset(struct blkg_stat *stat)
637 percpu_counter_set(&stat->cpu_cnt, 0);
638 atomic64_set(&stat->aux_cnt, 0);
642 * blkg_stat_add_aux - add a blkg_stat into another's aux count
643 * @to: the destination blkg_stat
646 * Add @from's count including the aux one to @to's aux count.
648 static inline void blkg_stat_add_aux(struct blkg_stat *to,
649 struct blkg_stat *from)
651 atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
655 static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
659 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
660 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
663 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
666 atomic64_set(&rwstat->aux_cnt[i], 0);
671 static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
675 for (i = 0; i < BLKG_RWSTAT_NR; i++)
676 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
680 * blkg_rwstat_add - add a value to a blkg_rwstat
681 * @rwstat: target blkg_rwstat
682 * @op: REQ_OP and flags
685 * Add @val to @rwstat. The counters are chosen according to @rw. The
686 * caller is responsible for synchronizing calls to this function.
688 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
689 unsigned int op, uint64_t val)
691 struct percpu_counter *cnt;
693 if (op_is_discard(op))
694 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
695 else if (op_is_write(op))
696 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
698 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
700 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
703 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
705 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
707 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
711 * blkg_rwstat_read - read the current values of a blkg_rwstat
712 * @rwstat: blkg_rwstat to read
714 * Read the current snapshot of @rwstat and return it in the aux counts.
716 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
718 struct blkg_rwstat result;
721 for (i = 0; i < BLKG_RWSTAT_NR; i++)
722 atomic64_set(&result.aux_cnt[i],
723 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
728 * blkg_rwstat_total - read the total count of a blkg_rwstat
729 * @rwstat: blkg_rwstat to read
731 * Return the total count of @rwstat regardless of the IO direction. This
732 * function can be called without synchronization and takes care of u64
735 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
737 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
739 return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
740 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
744 * blkg_rwstat_reset - reset a blkg_rwstat
745 * @rwstat: blkg_rwstat to reset
747 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
751 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
752 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
753 atomic64_set(&rwstat->aux_cnt[i], 0);
758 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
759 * @to: the destination blkg_rwstat
762 * Add @from's count including the aux one to @to's aux count.
764 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
765 struct blkg_rwstat *from)
767 u64 sum[BLKG_RWSTAT_NR];
770 for (i = 0; i < BLKG_RWSTAT_NR; i++)
771 sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
773 for (i = 0; i < BLKG_RWSTAT_NR; i++)
774 atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
778 #ifdef CONFIG_BLK_DEV_THROTTLING
779 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
782 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
783 struct bio *bio) { return false; }
786 static inline bool blkcg_bio_issue_check(struct request_queue *q,
790 struct blkcg_gq *blkg;
795 /* associate blkcg if bio hasn't attached one */
796 bio_associate_blkcg(bio, NULL);
797 blkcg = bio_blkcg(bio);
799 blkg = blkg_lookup(blkcg, q);
800 if (unlikely(!blkg)) {
801 spin_lock_irq(&q->queue_lock);
802 blkg = blkg_lookup_create(blkcg, q);
805 spin_unlock_irq(&q->queue_lock);
808 throtl = blk_throtl_bio(q, blkg, bio);
811 blkg = blkg ?: q->root_blkg;
813 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
814 * is a split bio and we would have already accounted for the
817 if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
818 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
819 bio->bi_iter.bi_size);
820 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
827 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
829 if (atomic_add_return(1, &blkg->use_delay) == 1)
830 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
833 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
835 int old = atomic_read(&blkg->use_delay);
841 * We do this song and dance because we can race with somebody else
842 * adding or removing delay. If we just did an atomic_dec we'd end up
843 * negative and we'd already be in trouble. We need to subtract 1 and
844 * then check to see if we were the last delay so we can drop the
845 * congestion count on the cgroup.
848 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
857 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
861 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
863 int old = atomic_read(&blkg->use_delay);
866 /* We only want 1 person clearing the congestion count for this blkg. */
868 int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
870 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
877 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
878 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
879 void blkcg_maybe_throttle_current(void);
880 #else /* CONFIG_BLK_CGROUP */
885 struct blkg_policy_data {
888 struct blkcg_policy_data {
894 struct blkcg_policy {
897 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
899 static inline void blkcg_maybe_throttle_current(void) { }
900 static inline bool blk_cgroup_congested(void) { return false; }
904 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
906 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
907 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
909 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
910 static inline void blkcg_drain_queue(struct request_queue *q) { }
911 static inline void blkcg_exit_queue(struct request_queue *q) { }
912 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
913 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
914 static inline int blkcg_activate_policy(struct request_queue *q,
915 const struct blkcg_policy *pol) { return 0; }
916 static inline void blkcg_deactivate_policy(struct request_queue *q,
917 const struct blkcg_policy *pol) { }
919 static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
920 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
922 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
923 struct blkcg_policy *pol) { return NULL; }
924 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
925 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
926 static inline void blkg_get(struct blkcg_gq *blkg) { }
927 static inline void blkg_put(struct blkcg_gq *blkg) { }
929 static inline bool blkcg_bio_issue_check(struct request_queue *q,
930 struct bio *bio) { return true; }
932 #define blk_queue_for_each_rl(rl, q) \
933 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
935 #endif /* CONFIG_BLOCK */
936 #endif /* CONFIG_BLK_CGROUP */
937 #endif /* _BLK_CGROUP_H */