Commit | Line | Data |
---|---|---|
672fdcf0 ML |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _BLK_CGROUP_PRIVATE_H | |
3 | #define _BLK_CGROUP_PRIVATE_H | |
4 | /* | |
5 | * block cgroup private header | |
6 | * | |
7 | * Based on ideas and code from CFQ, CFS and BFQ: | |
8 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
9 | * | |
10 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
11 | * Paolo Valente <paolo.valente@unimore.it> | |
12 | * | |
13 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
14 | * Nauman Rafique <nauman@google.com> | |
15 | */ | |
16 | ||
17 | #include <linux/blk-cgroup.h> | |
c97ab271 CH |
18 | #include <linux/cgroup.h> |
19 | #include <linux/kthread.h> | |
6b2b0459 | 20 | #include <linux/blk-mq.h> |
672fdcf0 | 21 | |
dec223c9 CH |
22 | struct blkcg_gq; |
23 | struct blkg_policy_data; | |
24 | ||
25 | ||
672fdcf0 ML |
26 | /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ |
27 | #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) | |
28 | ||
29 | #ifdef CONFIG_BLK_CGROUP | |
bbb1ebe7 CH |
30 | |
31 | enum blkg_iostat_type { | |
32 | BLKG_IOSTAT_READ, | |
33 | BLKG_IOSTAT_WRITE, | |
34 | BLKG_IOSTAT_DISCARD, | |
35 | ||
36 | BLKG_IOSTAT_NR, | |
37 | }; | |
38 | ||
39 | struct blkg_iostat { | |
40 | u64 bytes[BLKG_IOSTAT_NR]; | |
41 | u64 ios[BLKG_IOSTAT_NR]; | |
42 | }; | |
43 | ||
44 | struct blkg_iostat_set { | |
45 | struct u64_stats_sync sync; | |
46 | struct blkg_iostat cur; | |
47 | struct blkg_iostat last; | |
48 | }; | |
49 | ||
50 | /* association between a blk cgroup and a request queue */ | |
51 | struct blkcg_gq { | |
52 | /* Pointer to the associated request_queue */ | |
53 | struct request_queue *q; | |
54 | struct list_head q_node; | |
55 | struct hlist_node blkcg_node; | |
56 | struct blkcg *blkcg; | |
57 | ||
58 | /* all non-root blkcg_gq's are guaranteed to have access to parent */ | |
59 | struct blkcg_gq *parent; | |
60 | ||
61 | /* reference count */ | |
62 | struct percpu_ref refcnt; | |
63 | ||
64 | /* is this blkg online? protected by both blkcg and q locks */ | |
65 | bool online; | |
66 | ||
67 | struct blkg_iostat_set __percpu *iostat_cpu; | |
68 | struct blkg_iostat_set iostat; | |
69 | ||
70 | struct blkg_policy_data *pd[BLKCG_MAX_POLS]; | |
71 | ||
72 | spinlock_t async_bio_lock; | |
73 | struct bio_list async_bios; | |
74 | union { | |
75 | struct work_struct async_bio_work; | |
76 | struct work_struct free_work; | |
77 | }; | |
78 | ||
79 | atomic_t use_delay; | |
80 | atomic64_t delay_nsec; | |
81 | atomic64_t delay_start; | |
82 | u64 last_delay; | |
83 | int last_use; | |
84 | ||
85 | struct rcu_head rcu_head; | |
86 | }; | |
87 | ||
dec223c9 CH |
88 | struct blkcg { |
89 | struct cgroup_subsys_state css; | |
90 | spinlock_t lock; | |
91 | refcount_t online_pin; | |
92 | ||
93 | struct radix_tree_root blkg_tree; | |
94 | struct blkcg_gq __rcu *blkg_hint; | |
95 | struct hlist_head blkg_list; | |
96 | ||
97 | struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; | |
98 | ||
99 | struct list_head all_blkcgs_node; | |
100 | #ifdef CONFIG_BLK_CGROUP_FC_APPID | |
101 | char fc_app_id[FC_APPID_LEN]; | |
102 | #endif | |
103 | #ifdef CONFIG_CGROUP_WRITEBACK | |
104 | struct list_head cgwb_list; | |
105 | #endif | |
106 | }; | |
107 | ||
108 | static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) | |
109 | { | |
110 | return css ? container_of(css, struct blkcg, css) : NULL; | |
111 | } | |
672fdcf0 ML |
112 | |
113 | /* | |
114 | * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a | |
115 | * request_queue (q). This is used by blkcg policies which need to track | |
116 | * information per blkcg - q pair. | |
117 | * | |
118 | * There can be multiple active blkcg policies and each blkg:policy pair is | |
119 | * represented by a blkg_policy_data which is allocated and freed by each | |
120 | * policy's pd_alloc/free_fn() methods. A policy can allocate private data | |
121 | * area by allocating larger data structure which embeds blkg_policy_data | |
122 | * at the beginning. | |
123 | */ | |
124 | struct blkg_policy_data { | |
125 | /* the blkg and policy id this per-policy data belongs to */ | |
126 | struct blkcg_gq *blkg; | |
127 | int plid; | |
128 | }; | |
129 | ||
130 | /* | |
131 | * Policies that need to keep per-blkcg data which is independent from any | |
132 | * request_queue associated to it should implement cpd_alloc/free_fn() | |
133 | * methods. A policy can allocate private data area by allocating larger | |
134 | * data structure which embeds blkcg_policy_data at the beginning. | |
135 | * cpd_init() is invoked to let each policy handle per-blkcg data. | |
136 | */ | |
137 | struct blkcg_policy_data { | |
138 | /* the blkcg and policy id this per-policy data belongs to */ | |
139 | struct blkcg *blkcg; | |
140 | int plid; | |
141 | }; | |
142 | ||
143 | typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); | |
144 | typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); | |
145 | typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); | |
146 | typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); | |
147 | typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, | |
148 | struct request_queue *q, struct blkcg *blkcg); | |
149 | typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); | |
150 | typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); | |
151 | typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); | |
152 | typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); | |
153 | typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); | |
3607849d | 154 | typedef void (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, |
672fdcf0 ML |
155 | struct seq_file *s); |
156 | ||
157 | struct blkcg_policy { | |
158 | int plid; | |
159 | /* cgroup files for the policy */ | |
160 | struct cftype *dfl_cftypes; | |
161 | struct cftype *legacy_cftypes; | |
162 | ||
163 | /* operations */ | |
164 | blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; | |
165 | blkcg_pol_init_cpd_fn *cpd_init_fn; | |
166 | blkcg_pol_free_cpd_fn *cpd_free_fn; | |
167 | blkcg_pol_bind_cpd_fn *cpd_bind_fn; | |
168 | ||
169 | blkcg_pol_alloc_pd_fn *pd_alloc_fn; | |
170 | blkcg_pol_init_pd_fn *pd_init_fn; | |
171 | blkcg_pol_online_pd_fn *pd_online_fn; | |
172 | blkcg_pol_offline_pd_fn *pd_offline_fn; | |
173 | blkcg_pol_free_pd_fn *pd_free_fn; | |
174 | blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; | |
175 | blkcg_pol_stat_pd_fn *pd_stat_fn; | |
176 | }; | |
177 | ||
178 | extern struct blkcg blkcg_root; | |
179 | extern bool blkcg_debug_stats; | |
180 | ||
181 | struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, | |
182 | struct request_queue *q, bool update_hint); | |
183 | int blkcg_init_queue(struct request_queue *q); | |
184 | void blkcg_exit_queue(struct request_queue *q); | |
185 | ||
186 | /* Blkio controller policy registration */ | |
187 | int blkcg_policy_register(struct blkcg_policy *pol); | |
188 | void blkcg_policy_unregister(struct blkcg_policy *pol); | |
189 | int blkcg_activate_policy(struct request_queue *q, | |
190 | const struct blkcg_policy *pol); | |
191 | void blkcg_deactivate_policy(struct request_queue *q, | |
192 | const struct blkcg_policy *pol); | |
193 | ||
194 | const char *blkg_dev_name(struct blkcg_gq *blkg); | |
195 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, | |
196 | u64 (*prfill)(struct seq_file *, | |
197 | struct blkg_policy_data *, int), | |
198 | const struct blkcg_policy *pol, int data, | |
199 | bool show_total); | |
200 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); | |
201 | ||
202 | struct blkg_conf_ctx { | |
203 | struct block_device *bdev; | |
204 | struct blkcg_gq *blkg; | |
205 | char *body; | |
206 | }; | |
207 | ||
208 | struct block_device *blkcg_conf_open_bdev(char **inputp); | |
209 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | |
210 | char *input, struct blkg_conf_ctx *ctx); | |
211 | void blkg_conf_finish(struct blkg_conf_ctx *ctx); | |
212 | ||
672fdcf0 ML |
213 | /** |
214 | * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg | |
215 | * @return: true if this bio needs to be submitted with the root blkg context. | |
216 | * | |
217 | * In order to avoid priority inversions we sometimes need to issue a bio as if | |
218 | * it were attached to the root blkg, and then backcharge to the actual owning | |
bbb1ebe7 CH |
219 | * blkg. The idea is we do bio_blkcg_css() to look up the actual context for |
220 | * the bio and attach the appropriate blkg to the bio. Then we call this helper | |
221 | * and if it is true run with the root blkg for that queue and then do any | |
672fdcf0 ML |
222 | * backcharging to the originating cgroup once the io is complete. |
223 | */ | |
224 | static inline bool bio_issue_as_root_blkg(struct bio *bio) | |
225 | { | |
226 | return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; | |
227 | } | |
228 | ||
229 | /** | |
230 | * __blkg_lookup - internal version of blkg_lookup() | |
231 | * @blkcg: blkcg of interest | |
232 | * @q: request_queue of interest | |
233 | * @update_hint: whether to update lookup hint with the result or not | |
234 | * | |
235 | * This is internal version and shouldn't be used by policy | |
236 | * implementations. Looks up blkgs for the @blkcg - @q pair regardless of | |
237 | * @q's bypass state. If @update_hint is %true, the caller should be | |
238 | * holding @q->queue_lock and lookup hint is updated on success. | |
239 | */ | |
240 | static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, | |
241 | struct request_queue *q, | |
242 | bool update_hint) | |
243 | { | |
244 | struct blkcg_gq *blkg; | |
245 | ||
246 | if (blkcg == &blkcg_root) | |
247 | return q->root_blkg; | |
248 | ||
249 | blkg = rcu_dereference(blkcg->blkg_hint); | |
250 | if (blkg && blkg->q == q) | |
251 | return blkg; | |
252 | ||
253 | return blkg_lookup_slowpath(blkcg, q, update_hint); | |
254 | } | |
255 | ||
256 | /** | |
257 | * blkg_lookup - lookup blkg for the specified blkcg - q pair | |
258 | * @blkcg: blkcg of interest | |
259 | * @q: request_queue of interest | |
260 | * | |
261 | * Lookup blkg for the @blkcg - @q pair. This function should be called | |
262 | * under RCU read lock. | |
263 | */ | |
264 | static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, | |
265 | struct request_queue *q) | |
266 | { | |
267 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
268 | return __blkg_lookup(blkcg, q, false); | |
269 | } | |
270 | ||
271 | /** | |
272 | * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair | |
273 | * @q: request_queue of interest | |
274 | * | |
275 | * Lookup blkg for @q at the root level. See also blkg_lookup(). | |
276 | */ | |
277 | static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) | |
278 | { | |
279 | return q->root_blkg; | |
280 | } | |
281 | ||
282 | /** | |
283 | * blkg_to_pdata - get policy private data | |
284 | * @blkg: blkg of interest | |
285 | * @pol: policy of interest | |
286 | * | |
287 | * Return pointer to private data associated with the @blkg-@pol pair. | |
288 | */ | |
289 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, | |
290 | struct blkcg_policy *pol) | |
291 | { | |
292 | return blkg ? blkg->pd[pol->plid] : NULL; | |
293 | } | |
294 | ||
295 | static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, | |
296 | struct blkcg_policy *pol) | |
297 | { | |
298 | return blkcg ? blkcg->cpd[pol->plid] : NULL; | |
299 | } | |
300 | ||
301 | /** | |
302 | * pdata_to_blkg - get blkg associated with policy private data | |
303 | * @pd: policy private data of interest | |
304 | * | |
305 | * @pd is policy private data. Determine the blkg it's associated with. | |
306 | */ | |
307 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) | |
308 | { | |
309 | return pd ? pd->blkg : NULL; | |
310 | } | |
311 | ||
312 | static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) | |
313 | { | |
314 | return cpd ? cpd->blkcg : NULL; | |
315 | } | |
316 | ||
317 | /** | |
318 | * blkg_path - format cgroup path of blkg | |
319 | * @blkg: blkg of interest | |
320 | * @buf: target buffer | |
321 | * @buflen: target buffer length | |
322 | * | |
323 | * Format the path of the cgroup of @blkg into @buf. | |
324 | */ | |
325 | static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) | |
326 | { | |
327 | return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); | |
328 | } | |
329 | ||
330 | /** | |
331 | * blkg_get - get a blkg reference | |
332 | * @blkg: blkg to get | |
333 | * | |
334 | * The caller should be holding an existing reference. | |
335 | */ | |
336 | static inline void blkg_get(struct blkcg_gq *blkg) | |
337 | { | |
338 | percpu_ref_get(&blkg->refcnt); | |
339 | } | |
340 | ||
341 | /** | |
342 | * blkg_tryget - try and get a blkg reference | |
343 | * @blkg: blkg to get | |
344 | * | |
345 | * This is for use when doing an RCU lookup of the blkg. We may be in the midst | |
346 | * of freeing this blkg, so we can only use it if the refcnt is not zero. | |
347 | */ | |
348 | static inline bool blkg_tryget(struct blkcg_gq *blkg) | |
349 | { | |
350 | return blkg && percpu_ref_tryget(&blkg->refcnt); | |
351 | } | |
352 | ||
353 | /** | |
354 | * blkg_put - put a blkg reference | |
355 | * @blkg: blkg to put | |
356 | */ | |
357 | static inline void blkg_put(struct blkcg_gq *blkg) | |
358 | { | |
359 | percpu_ref_put(&blkg->refcnt); | |
360 | } | |
361 | ||
362 | /** | |
363 | * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants | |
364 | * @d_blkg: loop cursor pointing to the current descendant | |
365 | * @pos_css: used for iteration | |
366 | * @p_blkg: target blkg to walk descendants of | |
367 | * | |
368 | * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU | |
369 | * read locked. If called under either blkcg or queue lock, the iteration | |
370 | * is guaranteed to include all and only online blkgs. The caller may | |
371 | * update @pos_css by calling css_rightmost_descendant() to skip subtree. | |
372 | * @p_blkg is included in the iteration and the first node to be visited. | |
373 | */ | |
374 | #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ | |
375 | css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ | |
376 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ | |
377 | (p_blkg)->q, false))) | |
378 | ||
379 | /** | |
380 | * blkg_for_each_descendant_post - post-order walk of a blkg's descendants | |
381 | * @d_blkg: loop cursor pointing to the current descendant | |
382 | * @pos_css: used for iteration | |
383 | * @p_blkg: target blkg to walk descendants of | |
384 | * | |
385 | * Similar to blkg_for_each_descendant_pre() but performs post-order | |
386 | * traversal instead. Synchronization rules are the same. @p_blkg is | |
387 | * included in the iteration and the last node to be visited. | |
388 | */ | |
389 | #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ | |
390 | css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ | |
391 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ | |
392 | (p_blkg)->q, false))) | |
393 | ||
394 | bool __blkcg_punt_bio_submit(struct bio *bio); | |
395 | ||
396 | static inline bool blkcg_punt_bio_submit(struct bio *bio) | |
397 | { | |
398 | if (bio->bi_opf & REQ_CGROUP_PUNT) | |
399 | return __blkcg_punt_bio_submit(bio); | |
400 | else | |
401 | return false; | |
402 | } | |
403 | ||
404 | static inline void blkcg_bio_issue_init(struct bio *bio) | |
405 | { | |
406 | bio_issue_init(&bio->bi_issue, bio_sectors(bio)); | |
407 | } | |
408 | ||
409 | static inline void blkcg_use_delay(struct blkcg_gq *blkg) | |
410 | { | |
411 | if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) | |
412 | return; | |
413 | if (atomic_add_return(1, &blkg->use_delay) == 1) | |
414 | atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); | |
415 | } | |
416 | ||
417 | static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) | |
418 | { | |
419 | int old = atomic_read(&blkg->use_delay); | |
420 | ||
421 | if (WARN_ON_ONCE(old < 0)) | |
422 | return 0; | |
423 | if (old == 0) | |
424 | return 0; | |
425 | ||
426 | /* | |
427 | * We do this song and dance because we can race with somebody else | |
428 | * adding or removing delay. If we just did an atomic_dec we'd end up | |
429 | * negative and we'd already be in trouble. We need to subtract 1 and | |
430 | * then check to see if we were the last delay so we can drop the | |
431 | * congestion count on the cgroup. | |
432 | */ | |
433 | while (old) { | |
434 | int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); | |
435 | if (cur == old) | |
436 | break; | |
437 | old = cur; | |
438 | } | |
439 | ||
440 | if (old == 0) | |
441 | return 0; | |
442 | if (old == 1) | |
443 | atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); | |
444 | return 1; | |
445 | } | |
446 | ||
447 | /** | |
448 | * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount | |
449 | * @blkg: target blkg | |
450 | * @delay: delay duration in nsecs | |
451 | * | |
452 | * When enabled with this function, the delay is not decayed and must be | |
453 | * explicitly cleared with blkcg_clear_delay(). Must not be mixed with | |
454 | * blkcg_[un]use_delay() and blkcg_add_delay() usages. | |
455 | */ | |
456 | static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) | |
457 | { | |
458 | int old = atomic_read(&blkg->use_delay); | |
459 | ||
460 | /* We only want 1 person setting the congestion count for this blkg. */ | |
461 | if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old) | |
462 | atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); | |
463 | ||
464 | atomic64_set(&blkg->delay_nsec, delay); | |
465 | } | |
466 | ||
467 | /** | |
468 | * blkcg_clear_delay - Disable allocator delay mechanism | |
469 | * @blkg: target blkg | |
470 | * | |
471 | * Disable use_delay mechanism. See blkcg_set_delay(). | |
472 | */ | |
473 | static inline void blkcg_clear_delay(struct blkcg_gq *blkg) | |
474 | { | |
475 | int old = atomic_read(&blkg->use_delay); | |
476 | ||
477 | /* We only want 1 person clearing the congestion count for this blkg. */ | |
478 | if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old) | |
479 | atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); | |
480 | } | |
481 | ||
6b2b0459 TH |
482 | /** |
483 | * blk_cgroup_mergeable - Determine whether to allow or disallow merges | |
484 | * @rq: request to merge into | |
485 | * @bio: bio to merge | |
486 | * | |
487 | * @bio and @rq should belong to the same cgroup and their issue_as_root should | |
488 | * match. The latter is necessary as we don't want to throttle e.g. a metadata | |
489 | * update because it happens to be next to a regular IO. | |
490 | */ | |
491 | static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) | |
492 | { | |
493 | return rq->bio->bi_blkg == bio->bi_blkg && | |
494 | bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio); | |
495 | } | |
496 | ||
672fdcf0 ML |
497 | void blk_cgroup_bio_start(struct bio *bio); |
498 | void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); | |
499 | #else /* CONFIG_BLK_CGROUP */ | |
500 | ||
501 | struct blkg_policy_data { | |
502 | }; | |
503 | ||
504 | struct blkcg_policy_data { | |
505 | }; | |
506 | ||
507 | struct blkcg_policy { | |
508 | }; | |
509 | ||
bbb1ebe7 CH |
510 | struct blkcg { |
511 | }; | |
512 | ||
672fdcf0 ML |
513 | static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } |
514 | static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) | |
515 | { return NULL; } | |
516 | static inline int blkcg_init_queue(struct request_queue *q) { return 0; } | |
517 | static inline void blkcg_exit_queue(struct request_queue *q) { } | |
518 | static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } | |
519 | static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } | |
520 | static inline int blkcg_activate_policy(struct request_queue *q, | |
521 | const struct blkcg_policy *pol) { return 0; } | |
522 | static inline void blkcg_deactivate_policy(struct request_queue *q, | |
523 | const struct blkcg_policy *pol) { } | |
524 | ||
672fdcf0 ML |
525 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, |
526 | struct blkcg_policy *pol) { return NULL; } | |
527 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } | |
528 | static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } | |
529 | static inline void blkg_get(struct blkcg_gq *blkg) { } | |
530 | static inline void blkg_put(struct blkcg_gq *blkg) { } | |
531 | ||
532 | static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } | |
533 | static inline void blkcg_bio_issue_init(struct bio *bio) { } | |
534 | static inline void blk_cgroup_bio_start(struct bio *bio) { } | |
6b2b0459 | 535 | static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; } |
672fdcf0 ML |
536 | |
537 | #define blk_queue_for_each_rl(rl, q) \ | |
538 | for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) | |
539 | ||
672fdcf0 ML |
540 | #endif /* CONFIG_BLK_CGROUP */ |
541 | ||
542 | #endif /* _BLK_CGROUP_PRIVATE_H */ |