Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
31e4c28d VG |
2 | #ifndef _BLK_CGROUP_H |
3 | #define _BLK_CGROUP_H | |
4 | /* | |
5 | * Common Block IO controller cgroup interface | |
6 | * | |
7 | * Based on ideas and code from CFQ, CFS and BFQ: | |
8 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
9 | * | |
10 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
11 | * Paolo Valente <paolo.valente@unimore.it> | |
12 | * | |
13 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
14 | * Nauman Rafique <nauman@google.com> | |
15 | */ | |
16 | ||
17 | #include <linux/cgroup.h> | |
24bdb8ef | 18 | #include <linux/percpu_counter.h> |
829fdb50 | 19 | #include <linux/seq_file.h> |
a637120e | 20 | #include <linux/radix-tree.h> |
a051661c | 21 | #include <linux/blkdev.h> |
a5049a8a | 22 | #include <linux/atomic.h> |
902ec5b6 | 23 | #include <linux/kthread.h> |
31e4c28d | 24 | |
24bdb8ef TH |
25 | /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ |
26 | #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) | |
27 | ||
9355aede VG |
28 | /* Max limits for throttle policy */ |
29 | #define THROTL_IOPS_MAX UINT_MAX | |
30 | ||
f48ec1d7 TH |
31 | #ifdef CONFIG_BLK_CGROUP |
32 | ||
edcb0722 TH |
33 | enum blkg_rwstat_type { |
34 | BLKG_RWSTAT_READ, | |
35 | BLKG_RWSTAT_WRITE, | |
36 | BLKG_RWSTAT_SYNC, | |
37 | BLKG_RWSTAT_ASYNC, | |
636620b6 | 38 | BLKG_RWSTAT_DISCARD, |
edcb0722 TH |
39 | |
40 | BLKG_RWSTAT_NR, | |
41 | BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, | |
303a3acb DS |
42 | }; |
43 | ||
a637120e TH |
44 | struct blkcg_gq; |
45 | ||
3c798398 | 46 | struct blkcg { |
36558c8a TH |
47 | struct cgroup_subsys_state css; |
48 | spinlock_t lock; | |
a637120e TH |
49 | |
50 | struct radix_tree_root blkg_tree; | |
55679c8d | 51 | struct blkcg_gq __rcu *blkg_hint; |
36558c8a | 52 | struct hlist_head blkg_list; |
9a9e8a26 | 53 | |
81437648 | 54 | struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; |
52ebea74 | 55 | |
7876f930 | 56 | struct list_head all_blkcgs_node; |
52ebea74 TH |
57 | #ifdef CONFIG_CGROUP_WRITEBACK |
58 | struct list_head cgwb_list; | |
59b57717 | 59 | refcount_t cgwb_refcnt; |
52ebea74 | 60 | #endif |
31e4c28d VG |
61 | }; |
62 | ||
e6269c44 TH |
63 | /* |
64 | * blkg_[rw]stat->aux_cnt is excluded for local stats but included for | |
24bdb8ef TH |
65 | * recursive. Used to carry stats of dead children, and, for blkg_rwstat, |
66 | * to carry result values from read and sum operations. | |
e6269c44 | 67 | */ |
edcb0722 | 68 | struct blkg_stat { |
24bdb8ef | 69 | struct percpu_counter cpu_cnt; |
e6269c44 | 70 | atomic64_t aux_cnt; |
edcb0722 TH |
71 | }; |
72 | ||
73 | struct blkg_rwstat { | |
24bdb8ef | 74 | struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR]; |
e6269c44 | 75 | atomic64_t aux_cnt[BLKG_RWSTAT_NR]; |
edcb0722 TH |
76 | }; |
77 | ||
f95a04af TH |
78 | /* |
79 | * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a | |
80 | * request_queue (q). This is used by blkcg policies which need to track | |
81 | * information per blkcg - q pair. | |
82 | * | |
001bea73 TH |
83 | * There can be multiple active blkcg policies and each blkg:policy pair is |
84 | * represented by a blkg_policy_data which is allocated and freed by each | |
85 | * policy's pd_alloc/free_fn() methods. A policy can allocate private data | |
86 | * area by allocating larger data structure which embeds blkg_policy_data | |
87 | * at the beginning. | |
f95a04af | 88 | */ |
0381411e | 89 | struct blkg_policy_data { |
b276a876 | 90 | /* the blkg and policy id this per-policy data belongs to */ |
3c798398 | 91 | struct blkcg_gq *blkg; |
b276a876 | 92 | int plid; |
0381411e TH |
93 | }; |
94 | ||
e48453c3 | 95 | /* |
e4a9bde9 TH |
96 | * Policies that need to keep per-blkcg data which is independent from any |
97 | * request_queue associated to it should implement cpd_alloc/free_fn() | |
98 | * methods. A policy can allocate private data area by allocating larger | |
99 | * data structure which embeds blkcg_policy_data at the beginning. | |
100 | * cpd_init() is invoked to let each policy handle per-blkcg data. | |
e48453c3 AA |
101 | */ |
102 | struct blkcg_policy_data { | |
81437648 TH |
103 | /* the blkcg and policy id this per-policy data belongs to */ |
104 | struct blkcg *blkcg; | |
e48453c3 | 105 | int plid; |
e48453c3 AA |
106 | }; |
107 | ||
3c798398 TH |
108 | /* association between a blk cgroup and a request queue */ |
109 | struct blkcg_gq { | |
c875f4d0 | 110 | /* Pointer to the associated request_queue */ |
36558c8a TH |
111 | struct request_queue *q; |
112 | struct list_head q_node; | |
113 | struct hlist_node blkcg_node; | |
3c798398 | 114 | struct blkcg *blkcg; |
3c547865 | 115 | |
ce7acfea TH |
116 | /* |
117 | * Each blkg gets congested separately and the congestion state is | |
118 | * propagated to the matching bdi_writeback_congested. | |
119 | */ | |
120 | struct bdi_writeback_congested *wb_congested; | |
121 | ||
3c547865 TH |
122 | /* all non-root blkcg_gq's are guaranteed to have access to parent */ |
123 | struct blkcg_gq *parent; | |
124 | ||
a051661c TH |
125 | /* request allocation list for this blkcg-q pair */ |
126 | struct request_list rl; | |
3c547865 | 127 | |
1adaf3dd | 128 | /* reference count */ |
a5049a8a | 129 | atomic_t refcnt; |
22084190 | 130 | |
f427d909 TH |
131 | /* is this blkg online? protected by both blkcg and q locks */ |
132 | bool online; | |
133 | ||
77ea7338 TH |
134 | struct blkg_rwstat stat_bytes; |
135 | struct blkg_rwstat stat_ios; | |
136 | ||
36558c8a | 137 | struct blkg_policy_data *pd[BLKCG_MAX_POLS]; |
1adaf3dd | 138 | |
36558c8a | 139 | struct rcu_head rcu_head; |
d09d8df3 JB |
140 | |
141 | atomic_t use_delay; | |
142 | atomic64_t delay_nsec; | |
143 | atomic64_t delay_start; | |
144 | u64 last_delay; | |
145 | int last_use; | |
31e4c28d VG |
146 | }; |
147 | ||
e4a9bde9 | 148 | typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); |
81437648 | 149 | typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); |
e4a9bde9 | 150 | typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); |
69d7fde5 | 151 | typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); |
001bea73 | 152 | typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node); |
a9520cd6 TH |
153 | typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); |
154 | typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); | |
155 | typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); | |
001bea73 | 156 | typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); |
a9520cd6 | 157 | typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); |
903d23f0 JB |
158 | typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf, |
159 | size_t size); | |
3e252066 | 160 | |
3c798398 | 161 | struct blkcg_policy { |
36558c8a | 162 | int plid; |
36558c8a | 163 | /* cgroup files for the policy */ |
2ee867dc | 164 | struct cftype *dfl_cftypes; |
880f50e2 | 165 | struct cftype *legacy_cftypes; |
f9fcc2d3 TH |
166 | |
167 | /* operations */ | |
e4a9bde9 | 168 | blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; |
e48453c3 | 169 | blkcg_pol_init_cpd_fn *cpd_init_fn; |
e4a9bde9 | 170 | blkcg_pol_free_cpd_fn *cpd_free_fn; |
69d7fde5 | 171 | blkcg_pol_bind_cpd_fn *cpd_bind_fn; |
e4a9bde9 | 172 | |
001bea73 | 173 | blkcg_pol_alloc_pd_fn *pd_alloc_fn; |
f9fcc2d3 | 174 | blkcg_pol_init_pd_fn *pd_init_fn; |
f427d909 TH |
175 | blkcg_pol_online_pd_fn *pd_online_fn; |
176 | blkcg_pol_offline_pd_fn *pd_offline_fn; | |
001bea73 | 177 | blkcg_pol_free_pd_fn *pd_free_fn; |
f9fcc2d3 | 178 | blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; |
903d23f0 | 179 | blkcg_pol_stat_pd_fn *pd_stat_fn; |
3e252066 VG |
180 | }; |
181 | ||
3c798398 | 182 | extern struct blkcg blkcg_root; |
496d5e75 | 183 | extern struct cgroup_subsys_state * const blkcg_root_css; |
36558c8a | 184 | |
24f29046 TH |
185 | struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, |
186 | struct request_queue *q, bool update_hint); | |
3c798398 TH |
187 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
188 | struct request_queue *q); | |
36558c8a TH |
189 | int blkcg_init_queue(struct request_queue *q); |
190 | void blkcg_drain_queue(struct request_queue *q); | |
191 | void blkcg_exit_queue(struct request_queue *q); | |
5efd6113 | 192 | |
3e252066 | 193 | /* Blkio controller policy registration */ |
d5bf0291 | 194 | int blkcg_policy_register(struct blkcg_policy *pol); |
3c798398 | 195 | void blkcg_policy_unregister(struct blkcg_policy *pol); |
36558c8a | 196 | int blkcg_activate_policy(struct request_queue *q, |
3c798398 | 197 | const struct blkcg_policy *pol); |
36558c8a | 198 | void blkcg_deactivate_policy(struct request_queue *q, |
3c798398 | 199 | const struct blkcg_policy *pol); |
3e252066 | 200 | |
dd165eb3 | 201 | const char *blkg_dev_name(struct blkcg_gq *blkg); |
3c798398 | 202 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
f95a04af TH |
203 | u64 (*prfill)(struct seq_file *, |
204 | struct blkg_policy_data *, int), | |
3c798398 | 205 | const struct blkcg_policy *pol, int data, |
ec399347 | 206 | bool show_total); |
f95a04af TH |
207 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); |
208 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | |
829fdb50 | 209 | const struct blkg_rwstat *rwstat); |
f95a04af TH |
210 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); |
211 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | |
212 | int off); | |
77ea7338 TH |
213 | int blkg_print_stat_bytes(struct seq_file *sf, void *v); |
214 | int blkg_print_stat_ios(struct seq_file *sf, void *v); | |
215 | int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v); | |
216 | int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v); | |
829fdb50 | 217 | |
f12c74ca TH |
218 | u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, |
219 | struct blkcg_policy *pol, int off); | |
220 | struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, | |
221 | struct blkcg_policy *pol, int off); | |
16b3de66 | 222 | |
829fdb50 | 223 | struct blkg_conf_ctx { |
36558c8a | 224 | struct gendisk *disk; |
3c798398 | 225 | struct blkcg_gq *blkg; |
36aa9e5f | 226 | char *body; |
829fdb50 TH |
227 | }; |
228 | ||
3c798398 | 229 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
36aa9e5f | 230 | char *input, struct blkg_conf_ctx *ctx); |
829fdb50 TH |
231 | void blkg_conf_finish(struct blkg_conf_ctx *ctx); |
232 | ||
233 | ||
a7c6d554 TH |
234 | static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) |
235 | { | |
236 | return css ? container_of(css, struct blkcg, css) : NULL; | |
237 | } | |
238 | ||
b1208b56 TH |
239 | static inline struct blkcg *bio_blkcg(struct bio *bio) |
240 | { | |
902ec5b6 SL |
241 | struct cgroup_subsys_state *css; |
242 | ||
b1208b56 | 243 | if (bio && bio->bi_css) |
a7c6d554 | 244 | return css_to_blkcg(bio->bi_css); |
902ec5b6 SL |
245 | css = kthread_blkcg(); |
246 | if (css) | |
247 | return css_to_blkcg(css); | |
248 | return css_to_blkcg(task_css(current, io_cgrp_id)); | |
fd383c2d TH |
249 | } |
250 | ||
d09d8df3 JB |
251 | static inline bool blk_cgroup_congested(void) |
252 | { | |
253 | struct cgroup_subsys_state *css; | |
254 | bool ret = false; | |
255 | ||
256 | rcu_read_lock(); | |
257 | css = kthread_blkcg(); | |
258 | if (!css) | |
259 | css = task_css(current, io_cgrp_id); | |
260 | while (css) { | |
261 | if (atomic_read(&css->cgroup->congestion_count)) { | |
262 | ret = true; | |
263 | break; | |
264 | } | |
265 | css = css->parent; | |
266 | } | |
267 | rcu_read_unlock(); | |
268 | return ret; | |
269 | } | |
270 | ||
c7c98fd3 JB |
271 | /** |
272 | * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg | |
273 | * @return: true if this bio needs to be submitted with the root blkg context. | |
274 | * | |
275 | * In order to avoid priority inversions we sometimes need to issue a bio as if | |
276 | * it were attached to the root blkg, and then backcharge to the actual owning | |
277 | * blkg. The idea is we do bio_blkcg() to look up the actual context for the | |
278 | * bio and attach the appropriate blkg to the bio. Then we call this helper and | |
279 | * if it is true run with the root blkg for that queue and then do any | |
280 | * backcharging to the originating cgroup once the io is complete. | |
281 | */ | |
282 | static inline bool bio_issue_as_root_blkg(struct bio *bio) | |
283 | { | |
0d1e0c7c | 284 | return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; |
c7c98fd3 JB |
285 | } |
286 | ||
3c547865 TH |
287 | /** |
288 | * blkcg_parent - get the parent of a blkcg | |
289 | * @blkcg: blkcg of interest | |
290 | * | |
291 | * Return the parent blkcg of @blkcg. Can be called anytime. | |
292 | */ | |
293 | static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) | |
294 | { | |
5c9d535b | 295 | return css_to_blkcg(blkcg->css.parent); |
3c547865 TH |
296 | } |
297 | ||
24f29046 TH |
298 | /** |
299 | * __blkg_lookup - internal version of blkg_lookup() | |
300 | * @blkcg: blkcg of interest | |
301 | * @q: request_queue of interest | |
302 | * @update_hint: whether to update lookup hint with the result or not | |
303 | * | |
304 | * This is internal version and shouldn't be used by policy | |
305 | * implementations. Looks up blkgs for the @blkcg - @q pair regardless of | |
306 | * @q's bypass state. If @update_hint is %true, the caller should be | |
307 | * holding @q->queue_lock and lookup hint is updated on success. | |
308 | */ | |
309 | static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, | |
310 | struct request_queue *q, | |
311 | bool update_hint) | |
312 | { | |
313 | struct blkcg_gq *blkg; | |
314 | ||
85b6bc9d TH |
315 | if (blkcg == &blkcg_root) |
316 | return q->root_blkg; | |
317 | ||
24f29046 TH |
318 | blkg = rcu_dereference(blkcg->blkg_hint); |
319 | if (blkg && blkg->q == q) | |
320 | return blkg; | |
321 | ||
322 | return blkg_lookup_slowpath(blkcg, q, update_hint); | |
323 | } | |
324 | ||
325 | /** | |
326 | * blkg_lookup - lookup blkg for the specified blkcg - q pair | |
327 | * @blkcg: blkcg of interest | |
328 | * @q: request_queue of interest | |
329 | * | |
330 | * Lookup blkg for the @blkcg - @q pair. This function should be called | |
331 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing | |
332 | * - see blk_queue_bypass_start() for details. | |
333 | */ | |
334 | static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, | |
335 | struct request_queue *q) | |
336 | { | |
337 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
338 | ||
339 | if (unlikely(blk_queue_bypass(q))) | |
340 | return NULL; | |
341 | return __blkg_lookup(blkcg, q, false); | |
342 | } | |
343 | ||
6bad9b21 | 344 | /** |
b86d865c | 345 | * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair |
6bad9b21 BVA |
346 | * @q: request_queue of interest |
347 | * | |
348 | * Lookup blkg for @q at the root level. See also blkg_lookup(). | |
349 | */ | |
b86d865c | 350 | static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) |
6bad9b21 | 351 | { |
b86d865c | 352 | return q->root_blkg; |
6bad9b21 BVA |
353 | } |
354 | ||
0381411e TH |
355 | /** |
356 | * blkg_to_pdata - get policy private data | |
357 | * @blkg: blkg of interest | |
358 | * @pol: policy of interest | |
359 | * | |
360 | * Return pointer to private data associated with the @blkg-@pol pair. | |
361 | */ | |
f95a04af TH |
362 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, |
363 | struct blkcg_policy *pol) | |
0381411e | 364 | { |
f95a04af | 365 | return blkg ? blkg->pd[pol->plid] : NULL; |
0381411e TH |
366 | } |
367 | ||
e48453c3 AA |
368 | static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, |
369 | struct blkcg_policy *pol) | |
370 | { | |
81437648 | 371 | return blkcg ? blkcg->cpd[pol->plid] : NULL; |
e48453c3 AA |
372 | } |
373 | ||
0381411e TH |
374 | /** |
375 | * pdata_to_blkg - get blkg associated with policy private data | |
f95a04af | 376 | * @pd: policy private data of interest |
0381411e | 377 | * |
f95a04af | 378 | * @pd is policy private data. Determine the blkg it's associated with. |
0381411e | 379 | */ |
f95a04af | 380 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) |
0381411e | 381 | { |
f95a04af | 382 | return pd ? pd->blkg : NULL; |
0381411e TH |
383 | } |
384 | ||
81437648 TH |
385 | static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) |
386 | { | |
387 | return cpd ? cpd->blkcg : NULL; | |
388 | } | |
389 | ||
59b57717 DZF |
390 | extern void blkcg_destroy_blkgs(struct blkcg *blkcg); |
391 | ||
392 | #ifdef CONFIG_CGROUP_WRITEBACK | |
393 | ||
394 | /** | |
395 | * blkcg_cgwb_get - get a reference for blkcg->cgwb_list | |
396 | * @blkcg: blkcg of interest | |
397 | * | |
398 | * This is used to track the number of active wb's related to a blkcg. | |
399 | */ | |
400 | static inline void blkcg_cgwb_get(struct blkcg *blkcg) | |
401 | { | |
402 | refcount_inc(&blkcg->cgwb_refcnt); | |
403 | } | |
404 | ||
405 | /** | |
406 | * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list | |
407 | * @blkcg: blkcg of interest | |
408 | * | |
409 | * This is used to track the number of active wb's related to a blkcg. | |
410 | * When this count goes to zero, all active wb has finished so the | |
411 | * blkcg can continue destruction by calling blkcg_destroy_blkgs(). | |
412 | * This work may occur in cgwb_release_workfn() on the cgwb_release | |
413 | * workqueue. | |
414 | */ | |
415 | static inline void blkcg_cgwb_put(struct blkcg *blkcg) | |
416 | { | |
417 | if (refcount_dec_and_test(&blkcg->cgwb_refcnt)) | |
418 | blkcg_destroy_blkgs(blkcg); | |
419 | } | |
420 | ||
421 | #else | |
422 | ||
423 | static inline void blkcg_cgwb_get(struct blkcg *blkcg) { } | |
424 | ||
425 | static inline void blkcg_cgwb_put(struct blkcg *blkcg) | |
426 | { | |
427 | /* wb isn't being accounted, so trigger destruction right away */ | |
428 | blkcg_destroy_blkgs(blkcg); | |
429 | } | |
430 | ||
431 | #endif | |
432 | ||
54e7ed12 TH |
433 | /** |
434 | * blkg_path - format cgroup path of blkg | |
435 | * @blkg: blkg of interest | |
436 | * @buf: target buffer | |
437 | * @buflen: target buffer length | |
438 | * | |
439 | * Format the path of the cgroup of @blkg into @buf. | |
440 | */ | |
3c798398 | 441 | static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) |
afc24d49 | 442 | { |
4c737b41 | 443 | return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); |
afc24d49 VG |
444 | } |
445 | ||
1adaf3dd TH |
446 | /** |
447 | * blkg_get - get a blkg reference | |
448 | * @blkg: blkg to get | |
449 | * | |
a5049a8a | 450 | * The caller should be holding an existing reference. |
1adaf3dd | 451 | */ |
3c798398 | 452 | static inline void blkg_get(struct blkcg_gq *blkg) |
1adaf3dd | 453 | { |
a5049a8a TH |
454 | WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); |
455 | atomic_inc(&blkg->refcnt); | |
1adaf3dd TH |
456 | } |
457 | ||
d09d8df3 JB |
458 | /** |
459 | * blkg_try_get - try and get a blkg reference | |
460 | * @blkg: blkg to get | |
461 | * | |
462 | * This is for use when doing an RCU lookup of the blkg. We may be in the midst | |
463 | * of freeing this blkg, so we can only use it if the refcnt is not zero. | |
464 | */ | |
465 | static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg) | |
466 | { | |
467 | if (atomic_inc_not_zero(&blkg->refcnt)) | |
468 | return blkg; | |
469 | return NULL; | |
470 | } | |
471 | ||
472 | ||
2a4fd070 | 473 | void __blkg_release_rcu(struct rcu_head *rcu); |
1adaf3dd TH |
474 | |
475 | /** | |
476 | * blkg_put - put a blkg reference | |
477 | * @blkg: blkg to put | |
1adaf3dd | 478 | */ |
3c798398 | 479 | static inline void blkg_put(struct blkcg_gq *blkg) |
1adaf3dd | 480 | { |
a5049a8a TH |
481 | WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); |
482 | if (atomic_dec_and_test(&blkg->refcnt)) | |
2a4fd070 | 483 | call_rcu(&blkg->rcu_head, __blkg_release_rcu); |
1adaf3dd TH |
484 | } |
485 | ||
dd4a4ffc TH |
486 | /** |
487 | * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants | |
488 | * @d_blkg: loop cursor pointing to the current descendant | |
492eb21b | 489 | * @pos_css: used for iteration |
dd4a4ffc TH |
490 | * @p_blkg: target blkg to walk descendants of |
491 | * | |
492 | * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU | |
493 | * read locked. If called under either blkcg or queue lock, the iteration | |
494 | * is guaranteed to include all and only online blkgs. The caller may | |
492eb21b | 495 | * update @pos_css by calling css_rightmost_descendant() to skip subtree. |
bd8815a6 | 496 | * @p_blkg is included in the iteration and the first node to be visited. |
dd4a4ffc | 497 | */ |
492eb21b TH |
498 | #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ |
499 | css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ | |
500 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ | |
dd4a4ffc TH |
501 | (p_blkg)->q, false))) |
502 | ||
aa539cb3 TH |
503 | /** |
504 | * blkg_for_each_descendant_post - post-order walk of a blkg's descendants | |
505 | * @d_blkg: loop cursor pointing to the current descendant | |
492eb21b | 506 | * @pos_css: used for iteration |
aa539cb3 TH |
507 | * @p_blkg: target blkg to walk descendants of |
508 | * | |
509 | * Similar to blkg_for_each_descendant_pre() but performs post-order | |
bd8815a6 TH |
510 | * traversal instead. Synchronization rules are the same. @p_blkg is |
511 | * included in the iteration and the last node to be visited. | |
aa539cb3 | 512 | */ |
492eb21b TH |
513 | #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ |
514 | css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ | |
515 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ | |
aa539cb3 TH |
516 | (p_blkg)->q, false))) |
517 | ||
a051661c TH |
518 | /** |
519 | * blk_get_rl - get request_list to use | |
520 | * @q: request_queue of interest | |
521 | * @bio: bio which will be attached to the allocated request (may be %NULL) | |
522 | * | |
523 | * The caller wants to allocate a request from @q to use for @bio. Find | |
524 | * the request_list to use and obtain a reference on it. Should be called | |
525 | * under queue_lock. This function is guaranteed to return non-%NULL | |
526 | * request_list. | |
527 | */ | |
528 | static inline struct request_list *blk_get_rl(struct request_queue *q, | |
529 | struct bio *bio) | |
530 | { | |
531 | struct blkcg *blkcg; | |
532 | struct blkcg_gq *blkg; | |
533 | ||
534 | rcu_read_lock(); | |
535 | ||
536 | blkcg = bio_blkcg(bio); | |
537 | ||
538 | /* bypass blkg lookup and use @q->root_rl directly for root */ | |
539 | if (blkcg == &blkcg_root) | |
540 | goto root_rl; | |
541 | ||
542 | /* | |
543 | * Try to use blkg->rl. blkg lookup may fail under memory pressure | |
544 | * or if either the blkcg or queue is going away. Fall back to | |
545 | * root_rl in such cases. | |
546 | */ | |
ae118896 TH |
547 | blkg = blkg_lookup(blkcg, q); |
548 | if (unlikely(!blkg)) | |
a051661c TH |
549 | goto root_rl; |
550 | ||
551 | blkg_get(blkg); | |
552 | rcu_read_unlock(); | |
553 | return &blkg->rl; | |
554 | root_rl: | |
555 | rcu_read_unlock(); | |
556 | return &q->root_rl; | |
557 | } | |
558 | ||
559 | /** | |
560 | * blk_put_rl - put request_list | |
561 | * @rl: request_list to put | |
562 | * | |
563 | * Put the reference acquired by blk_get_rl(). Should be called under | |
564 | * queue_lock. | |
565 | */ | |
566 | static inline void blk_put_rl(struct request_list *rl) | |
567 | { | |
401efbf8 | 568 | if (rl->blkg->blkcg != &blkcg_root) |
a051661c TH |
569 | blkg_put(rl->blkg); |
570 | } | |
571 | ||
572 | /** | |
573 | * blk_rq_set_rl - associate a request with a request_list | |
574 | * @rq: request of interest | |
575 | * @rl: target request_list | |
576 | * | |
577 | * Associate @rq with @rl so that accounting and freeing can know the | |
578 | * request_list @rq came from. | |
579 | */ | |
580 | static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) | |
581 | { | |
582 | rq->rl = rl; | |
583 | } | |
584 | ||
585 | /** | |
586 | * blk_rq_rl - return the request_list a request came from | |
587 | * @rq: request of interest | |
588 | * | |
589 | * Return the request_list @rq is allocated from. | |
590 | */ | |
591 | static inline struct request_list *blk_rq_rl(struct request *rq) | |
592 | { | |
593 | return rq->rl; | |
594 | } | |
595 | ||
596 | struct request_list *__blk_queue_next_rl(struct request_list *rl, | |
597 | struct request_queue *q); | |
598 | /** | |
599 | * blk_queue_for_each_rl - iterate through all request_lists of a request_queue | |
600 | * | |
601 | * Should be used under queue_lock. | |
602 | */ | |
603 | #define blk_queue_for_each_rl(rl, q) \ | |
604 | for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) | |
605 | ||
24bdb8ef | 606 | static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp) |
90d3839b | 607 | { |
24bdb8ef TH |
608 | int ret; |
609 | ||
610 | ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); | |
611 | if (ret) | |
612 | return ret; | |
613 | ||
e6269c44 | 614 | atomic64_set(&stat->aux_cnt, 0); |
24bdb8ef TH |
615 | return 0; |
616 | } | |
617 | ||
618 | static inline void blkg_stat_exit(struct blkg_stat *stat) | |
619 | { | |
620 | percpu_counter_destroy(&stat->cpu_cnt); | |
90d3839b PZ |
621 | } |
622 | ||
edcb0722 TH |
623 | /** |
624 | * blkg_stat_add - add a value to a blkg_stat | |
625 | * @stat: target blkg_stat | |
626 | * @val: value to add | |
627 | * | |
24bdb8ef TH |
628 | * Add @val to @stat. The caller must ensure that IRQ on the same CPU |
629 | * don't re-enter this function for the same counter. | |
edcb0722 TH |
630 | */ |
631 | static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) | |
632 | { | |
104b4e51 | 633 | percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); |
edcb0722 TH |
634 | } |
635 | ||
636 | /** | |
637 | * blkg_stat_read - read the current value of a blkg_stat | |
638 | * @stat: blkg_stat to read | |
edcb0722 TH |
639 | */ |
640 | static inline uint64_t blkg_stat_read(struct blkg_stat *stat) | |
641 | { | |
24bdb8ef | 642 | return percpu_counter_sum_positive(&stat->cpu_cnt); |
edcb0722 TH |
643 | } |
644 | ||
645 | /** | |
646 | * blkg_stat_reset - reset a blkg_stat | |
647 | * @stat: blkg_stat to reset | |
648 | */ | |
649 | static inline void blkg_stat_reset(struct blkg_stat *stat) | |
650 | { | |
24bdb8ef | 651 | percpu_counter_set(&stat->cpu_cnt, 0); |
e6269c44 | 652 | atomic64_set(&stat->aux_cnt, 0); |
edcb0722 TH |
653 | } |
654 | ||
16b3de66 | 655 | /** |
e6269c44 | 656 | * blkg_stat_add_aux - add a blkg_stat into another's aux count |
16b3de66 TH |
657 | * @to: the destination blkg_stat |
658 | * @from: the source | |
659 | * | |
e6269c44 | 660 | * Add @from's count including the aux one to @to's aux count. |
16b3de66 | 661 | */ |
e6269c44 TH |
662 | static inline void blkg_stat_add_aux(struct blkg_stat *to, |
663 | struct blkg_stat *from) | |
16b3de66 | 664 | { |
e6269c44 TH |
665 | atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt), |
666 | &to->aux_cnt); | |
16b3de66 TH |
667 | } |
668 | ||
24bdb8ef | 669 | static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp) |
90d3839b | 670 | { |
24bdb8ef TH |
671 | int i, ret; |
672 | ||
673 | for (i = 0; i < BLKG_RWSTAT_NR; i++) { | |
674 | ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp); | |
675 | if (ret) { | |
676 | while (--i >= 0) | |
677 | percpu_counter_destroy(&rwstat->cpu_cnt[i]); | |
678 | return ret; | |
679 | } | |
680 | atomic64_set(&rwstat->aux_cnt[i], 0); | |
681 | } | |
682 | return 0; | |
683 | } | |
e6269c44 | 684 | |
24bdb8ef TH |
685 | static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) |
686 | { | |
687 | int i; | |
e6269c44 TH |
688 | |
689 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
24bdb8ef | 690 | percpu_counter_destroy(&rwstat->cpu_cnt[i]); |
90d3839b PZ |
691 | } |
692 | ||
edcb0722 TH |
693 | /** |
694 | * blkg_rwstat_add - add a value to a blkg_rwstat | |
695 | * @rwstat: target blkg_rwstat | |
ef295ecf | 696 | * @op: REQ_OP and flags |
edcb0722 TH |
697 | * @val: value to add |
698 | * | |
699 | * Add @val to @rwstat. The counters are chosen according to @rw. The | |
700 | * caller is responsible for synchronizing calls to this function. | |
701 | */ | |
702 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, | |
ef295ecf | 703 | unsigned int op, uint64_t val) |
edcb0722 | 704 | { |
24bdb8ef | 705 | struct percpu_counter *cnt; |
edcb0722 | 706 | |
636620b6 TH |
707 | if (op_is_discard(op)) |
708 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD]; | |
709 | else if (op_is_write(op)) | |
24bdb8ef | 710 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; |
edcb0722 | 711 | else |
24bdb8ef TH |
712 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; |
713 | ||
104b4e51 | 714 | percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); |
24bdb8ef | 715 | |
d71d9ae1 | 716 | if (op_is_sync(op)) |
24bdb8ef | 717 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; |
edcb0722 | 718 | else |
24bdb8ef | 719 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; |
edcb0722 | 720 | |
104b4e51 | 721 | percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); |
edcb0722 TH |
722 | } |
723 | ||
724 | /** | |
725 | * blkg_rwstat_read - read the current values of a blkg_rwstat | |
726 | * @rwstat: blkg_rwstat to read | |
727 | * | |
24bdb8ef | 728 | * Read the current snapshot of @rwstat and return it in the aux counts. |
edcb0722 | 729 | */ |
c94bed89 | 730 | static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) |
edcb0722 | 731 | { |
24bdb8ef TH |
732 | struct blkg_rwstat result; |
733 | int i; | |
edcb0722 | 734 | |
24bdb8ef TH |
735 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
736 | atomic64_set(&result.aux_cnt[i], | |
737 | percpu_counter_sum_positive(&rwstat->cpu_cnt[i])); | |
738 | return result; | |
edcb0722 TH |
739 | } |
740 | ||
741 | /** | |
4d5e80a7 | 742 | * blkg_rwstat_total - read the total count of a blkg_rwstat |
edcb0722 TH |
743 | * @rwstat: blkg_rwstat to read |
744 | * | |
745 | * Return the total count of @rwstat regardless of the IO direction. This | |
746 | * function can be called without synchronization and takes care of u64 | |
747 | * atomicity. | |
748 | */ | |
4d5e80a7 | 749 | static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) |
edcb0722 TH |
750 | { |
751 | struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); | |
752 | ||
24bdb8ef TH |
753 | return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + |
754 | atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); | |
edcb0722 TH |
755 | } |
756 | ||
757 | /** | |
758 | * blkg_rwstat_reset - reset a blkg_rwstat | |
759 | * @rwstat: blkg_rwstat to reset | |
760 | */ | |
761 | static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) | |
762 | { | |
e6269c44 TH |
763 | int i; |
764 | ||
24bdb8ef TH |
765 | for (i = 0; i < BLKG_RWSTAT_NR; i++) { |
766 | percpu_counter_set(&rwstat->cpu_cnt[i], 0); | |
e6269c44 | 767 | atomic64_set(&rwstat->aux_cnt[i], 0); |
24bdb8ef | 768 | } |
edcb0722 TH |
769 | } |
770 | ||
16b3de66 | 771 | /** |
e6269c44 | 772 | * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count |
16b3de66 TH |
773 | * @to: the destination blkg_rwstat |
774 | * @from: the source | |
775 | * | |
e6269c44 | 776 | * Add @from's count including the aux one to @to's aux count. |
16b3de66 | 777 | */ |
e6269c44 TH |
778 | static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, |
779 | struct blkg_rwstat *from) | |
16b3de66 | 780 | { |
ddc21231 | 781 | u64 sum[BLKG_RWSTAT_NR]; |
16b3de66 TH |
782 | int i; |
783 | ||
16b3de66 | 784 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
ddc21231 AB |
785 | sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]); |
786 | ||
787 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
788 | atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]), | |
e6269c44 | 789 | &to->aux_cnt[i]); |
16b3de66 TH |
790 | } |
791 | ||
ae118896 TH |
792 | #ifdef CONFIG_BLK_DEV_THROTTLING |
793 | extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, | |
794 | struct bio *bio); | |
795 | #else | |
796 | static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, | |
797 | struct bio *bio) { return false; } | |
798 | #endif | |
799 | ||
800 | static inline bool blkcg_bio_issue_check(struct request_queue *q, | |
801 | struct bio *bio) | |
802 | { | |
803 | struct blkcg *blkcg; | |
804 | struct blkcg_gq *blkg; | |
805 | bool throtl = false; | |
806 | ||
807 | rcu_read_lock(); | |
808 | blkcg = bio_blkcg(bio); | |
809 | ||
007cc56b SL |
810 | /* associate blkcg if bio hasn't attached one */ |
811 | bio_associate_blkcg(bio, &blkcg->css); | |
812 | ||
ae118896 TH |
813 | blkg = blkg_lookup(blkcg, q); |
814 | if (unlikely(!blkg)) { | |
815 | spin_lock_irq(q->queue_lock); | |
816 | blkg = blkg_lookup_create(blkcg, q); | |
817 | if (IS_ERR(blkg)) | |
818 | blkg = NULL; | |
819 | spin_unlock_irq(q->queue_lock); | |
820 | } | |
821 | ||
822 | throtl = blk_throtl_bio(q, blkg, bio); | |
823 | ||
77ea7338 TH |
824 | if (!throtl) { |
825 | blkg = blkg ?: q->root_blkg; | |
c454edc2 JB |
826 | /* |
827 | * If the bio is flagged with BIO_QUEUE_ENTERED it means this | |
828 | * is a split bio and we would have already accounted for the | |
829 | * size of the bio. | |
830 | */ | |
831 | if (!bio_flagged(bio, BIO_QUEUE_ENTERED)) | |
832 | blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf, | |
833 | bio->bi_iter.bi_size); | |
ef295ecf | 834 | blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); |
77ea7338 TH |
835 | } |
836 | ||
ae118896 TH |
837 | rcu_read_unlock(); |
838 | return !throtl; | |
839 | } | |
840 | ||
d09d8df3 JB |
841 | static inline void blkcg_use_delay(struct blkcg_gq *blkg) |
842 | { | |
843 | if (atomic_add_return(1, &blkg->use_delay) == 1) | |
844 | atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); | |
845 | } | |
846 | ||
847 | static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) | |
848 | { | |
849 | int old = atomic_read(&blkg->use_delay); | |
850 | ||
851 | if (old == 0) | |
852 | return 0; | |
853 | ||
854 | /* | |
855 | * We do this song and dance because we can race with somebody else | |
856 | * adding or removing delay. If we just did an atomic_dec we'd end up | |
857 | * negative and we'd already be in trouble. We need to subtract 1 and | |
858 | * then check to see if we were the last delay so we can drop the | |
859 | * congestion count on the cgroup. | |
860 | */ | |
861 | while (old) { | |
862 | int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); | |
863 | if (cur == old) | |
864 | break; | |
865 | old = cur; | |
866 | } | |
867 | ||
868 | if (old == 0) | |
869 | return 0; | |
870 | if (old == 1) | |
871 | atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); | |
872 | return 1; | |
873 | } | |
874 | ||
875 | static inline void blkcg_clear_delay(struct blkcg_gq *blkg) | |
876 | { | |
877 | int old = atomic_read(&blkg->use_delay); | |
878 | if (!old) | |
879 | return; | |
880 | /* We only want 1 person clearing the congestion count for this blkg. */ | |
881 | while (old) { | |
882 | int cur = atomic_cmpxchg(&blkg->use_delay, old, 0); | |
883 | if (cur == old) { | |
884 | atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); | |
885 | break; | |
886 | } | |
887 | old = cur; | |
888 | } | |
889 | } | |
890 | ||
891 | void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); | |
892 | void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); | |
893 | void blkcg_maybe_throttle_current(void); | |
36558c8a TH |
894 | #else /* CONFIG_BLK_CGROUP */ |
895 | ||
efa7d1c7 TH |
896 | struct blkcg { |
897 | }; | |
2f5ea477 | 898 | |
f95a04af TH |
899 | struct blkg_policy_data { |
900 | }; | |
901 | ||
e48453c3 AA |
902 | struct blkcg_policy_data { |
903 | }; | |
904 | ||
3c798398 | 905 | struct blkcg_gq { |
2f5ea477 JA |
906 | }; |
907 | ||
3c798398 | 908 | struct blkcg_policy { |
3e252066 VG |
909 | }; |
910 | ||
496d5e75 TH |
911 | #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) |
912 | ||
d09d8df3 JB |
913 | static inline void blkcg_maybe_throttle_current(void) { } |
914 | static inline bool blk_cgroup_congested(void) { return false; } | |
915 | ||
efa7d1c7 TH |
916 | #ifdef CONFIG_BLOCK |
917 | ||
d09d8df3 JB |
918 | static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { } |
919 | ||
3c798398 | 920 | static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } |
b86d865c BVA |
921 | static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) |
922 | { return NULL; } | |
5efd6113 TH |
923 | static inline int blkcg_init_queue(struct request_queue *q) { return 0; } |
924 | static inline void blkcg_drain_queue(struct request_queue *q) { } | |
925 | static inline void blkcg_exit_queue(struct request_queue *q) { } | |
d5bf0291 | 926 | static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } |
3c798398 | 927 | static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } |
a2b1693b | 928 | static inline int blkcg_activate_policy(struct request_queue *q, |
3c798398 | 929 | const struct blkcg_policy *pol) { return 0; } |
a2b1693b | 930 | static inline void blkcg_deactivate_policy(struct request_queue *q, |
3c798398 TH |
931 | const struct blkcg_policy *pol) { } |
932 | ||
b1208b56 | 933 | static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } |
a051661c | 934 | |
f95a04af TH |
935 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, |
936 | struct blkcg_policy *pol) { return NULL; } | |
937 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } | |
3c798398 TH |
938 | static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } |
939 | static inline void blkg_get(struct blkcg_gq *blkg) { } | |
940 | static inline void blkg_put(struct blkcg_gq *blkg) { } | |
afc24d49 | 941 | |
a051661c TH |
942 | static inline struct request_list *blk_get_rl(struct request_queue *q, |
943 | struct bio *bio) { return &q->root_rl; } | |
944 | static inline void blk_put_rl(struct request_list *rl) { } | |
945 | static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } | |
946 | static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } | |
947 | ||
ae118896 TH |
948 | static inline bool blkcg_bio_issue_check(struct request_queue *q, |
949 | struct bio *bio) { return true; } | |
950 | ||
a051661c TH |
951 | #define blk_queue_for_each_rl(rl, q) \ |
952 | for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) | |
953 | ||
efa7d1c7 | 954 | #endif /* CONFIG_BLOCK */ |
36558c8a TH |
955 | #endif /* CONFIG_BLK_CGROUP */ |
956 | #endif /* _BLK_CGROUP_H */ |