Commit | Line | Data |
---|---|---|
672fdcf0 ML |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _BLK_CGROUP_PRIVATE_H | |
3 | #define _BLK_CGROUP_PRIVATE_H | |
4 | /* | |
5 | * block cgroup private header | |
6 | * | |
7 | * Based on ideas and code from CFQ, CFS and BFQ: | |
8 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
9 | * | |
10 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
11 | * Paolo Valente <paolo.valente@unimore.it> | |
12 | * | |
13 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
14 | * Nauman Rafique <nauman@google.com> | |
15 | */ | |
16 | ||
17 | #include <linux/blk-cgroup.h> | |
6b2b0459 | 18 | #include <linux/blk-mq.h> |
672fdcf0 ML |
19 | |
20 | /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ | |
21 | #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) | |
22 | ||
23 | #ifdef CONFIG_BLK_CGROUP | |
24 | ||
25 | /* | |
26 | * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a | |
27 | * request_queue (q). This is used by blkcg policies which need to track | |
28 | * information per blkcg - q pair. | |
29 | * | |
30 | * There can be multiple active blkcg policies and each blkg:policy pair is | |
31 | * represented by a blkg_policy_data which is allocated and freed by each | |
32 | * policy's pd_alloc/free_fn() methods. A policy can allocate private data | |
33 | * area by allocating larger data structure which embeds blkg_policy_data | |
34 | * at the beginning. | |
35 | */ | |
36 | struct blkg_policy_data { | |
37 | /* the blkg and policy id this per-policy data belongs to */ | |
38 | struct blkcg_gq *blkg; | |
39 | int plid; | |
40 | }; | |
41 | ||
42 | /* | |
43 | * Policies that need to keep per-blkcg data which is independent from any | |
44 | * request_queue associated to it should implement cpd_alloc/free_fn() | |
45 | * methods. A policy can allocate private data area by allocating larger | |
46 | * data structure which embeds blkcg_policy_data at the beginning. | |
47 | * cpd_init() is invoked to let each policy handle per-blkcg data. | |
48 | */ | |
49 | struct blkcg_policy_data { | |
50 | /* the blkcg and policy id this per-policy data belongs to */ | |
51 | struct blkcg *blkcg; | |
52 | int plid; | |
53 | }; | |
54 | ||
55 | typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); | |
56 | typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); | |
57 | typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); | |
58 | typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); | |
59 | typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, | |
60 | struct request_queue *q, struct blkcg *blkcg); | |
61 | typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); | |
62 | typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); | |
63 | typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); | |
64 | typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); | |
65 | typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); | |
66 | typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, | |
67 | struct seq_file *s); | |
68 | ||
69 | struct blkcg_policy { | |
70 | int plid; | |
71 | /* cgroup files for the policy */ | |
72 | struct cftype *dfl_cftypes; | |
73 | struct cftype *legacy_cftypes; | |
74 | ||
75 | /* operations */ | |
76 | blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; | |
77 | blkcg_pol_init_cpd_fn *cpd_init_fn; | |
78 | blkcg_pol_free_cpd_fn *cpd_free_fn; | |
79 | blkcg_pol_bind_cpd_fn *cpd_bind_fn; | |
80 | ||
81 | blkcg_pol_alloc_pd_fn *pd_alloc_fn; | |
82 | blkcg_pol_init_pd_fn *pd_init_fn; | |
83 | blkcg_pol_online_pd_fn *pd_online_fn; | |
84 | blkcg_pol_offline_pd_fn *pd_offline_fn; | |
85 | blkcg_pol_free_pd_fn *pd_free_fn; | |
86 | blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; | |
87 | blkcg_pol_stat_pd_fn *pd_stat_fn; | |
88 | }; | |
89 | ||
90 | extern struct blkcg blkcg_root; | |
91 | extern bool blkcg_debug_stats; | |
92 | ||
93 | struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, | |
94 | struct request_queue *q, bool update_hint); | |
95 | int blkcg_init_queue(struct request_queue *q); | |
96 | void blkcg_exit_queue(struct request_queue *q); | |
97 | ||
98 | /* Blkio controller policy registration */ | |
99 | int blkcg_policy_register(struct blkcg_policy *pol); | |
100 | void blkcg_policy_unregister(struct blkcg_policy *pol); | |
101 | int blkcg_activate_policy(struct request_queue *q, | |
102 | const struct blkcg_policy *pol); | |
103 | void blkcg_deactivate_policy(struct request_queue *q, | |
104 | const struct blkcg_policy *pol); | |
105 | ||
106 | const char *blkg_dev_name(struct blkcg_gq *blkg); | |
107 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, | |
108 | u64 (*prfill)(struct seq_file *, | |
109 | struct blkg_policy_data *, int), | |
110 | const struct blkcg_policy *pol, int data, | |
111 | bool show_total); | |
112 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); | |
113 | ||
114 | struct blkg_conf_ctx { | |
115 | struct block_device *bdev; | |
116 | struct blkcg_gq *blkg; | |
117 | char *body; | |
118 | }; | |
119 | ||
120 | struct block_device *blkcg_conf_open_bdev(char **inputp); | |
121 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | |
122 | char *input, struct blkg_conf_ctx *ctx); | |
123 | void blkg_conf_finish(struct blkg_conf_ctx *ctx); | |
124 | ||
125 | /** | |
126 | * blkcg_css - find the current css | |
127 | * | |
128 | * Find the css associated with either the kthread or the current task. | |
129 | * This may return a dying css, so it is up to the caller to use tryget logic | |
130 | * to confirm it is alive and well. | |
131 | */ | |
132 | static inline struct cgroup_subsys_state *blkcg_css(void) | |
133 | { | |
134 | struct cgroup_subsys_state *css; | |
135 | ||
136 | css = kthread_blkcg(); | |
137 | if (css) | |
138 | return css; | |
139 | return task_css(current, io_cgrp_id); | |
140 | } | |
141 | ||
142 | /** | |
143 | * __bio_blkcg - internal, inconsistent version to get blkcg | |
144 | * | |
145 | * DO NOT USE. | |
146 | * This function is inconsistent and consequently is dangerous to use. The | |
147 | * first part of the function returns a blkcg where a reference is owned by the | |
148 | * bio. This means it does not need to be rcu protected as it cannot go away | |
149 | * with the bio owning a reference to it. However, the latter potentially gets | |
150 | * it from task_css(). This can race against task migration and the cgroup | |
151 | * dying. It is also semantically different as it must be called rcu protected | |
152 | * and is susceptible to failure when trying to get a reference to it. | |
153 | * Therefore, it is not ok to assume that *_get() will always succeed on the | |
154 | * blkcg returned here. | |
155 | */ | |
156 | static inline struct blkcg *__bio_blkcg(struct bio *bio) | |
157 | { | |
158 | if (bio && bio->bi_blkg) | |
159 | return bio->bi_blkg->blkcg; | |
160 | return css_to_blkcg(blkcg_css()); | |
161 | } | |
162 | ||
163 | /** | |
164 | * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg | |
165 | * @return: true if this bio needs to be submitted with the root blkg context. | |
166 | * | |
167 | * In order to avoid priority inversions we sometimes need to issue a bio as if | |
168 | * it were attached to the root blkg, and then backcharge to the actual owning | |
169 | * blkg. The idea is we do bio_blkcg() to look up the actual context for the | |
170 | * bio and attach the appropriate blkg to the bio. Then we call this helper and | |
171 | * if it is true run with the root blkg for that queue and then do any | |
172 | * backcharging to the originating cgroup once the io is complete. | |
173 | */ | |
174 | static inline bool bio_issue_as_root_blkg(struct bio *bio) | |
175 | { | |
176 | return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; | |
177 | } | |
178 | ||
179 | /** | |
180 | * __blkg_lookup - internal version of blkg_lookup() | |
181 | * @blkcg: blkcg of interest | |
182 | * @q: request_queue of interest | |
183 | * @update_hint: whether to update lookup hint with the result or not | |
184 | * | |
185 | * This is internal version and shouldn't be used by policy | |
186 | * implementations. Looks up blkgs for the @blkcg - @q pair regardless of | |
187 | * @q's bypass state. If @update_hint is %true, the caller should be | |
188 | * holding @q->queue_lock and lookup hint is updated on success. | |
189 | */ | |
190 | static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, | |
191 | struct request_queue *q, | |
192 | bool update_hint) | |
193 | { | |
194 | struct blkcg_gq *blkg; | |
195 | ||
196 | if (blkcg == &blkcg_root) | |
197 | return q->root_blkg; | |
198 | ||
199 | blkg = rcu_dereference(blkcg->blkg_hint); | |
200 | if (blkg && blkg->q == q) | |
201 | return blkg; | |
202 | ||
203 | return blkg_lookup_slowpath(blkcg, q, update_hint); | |
204 | } | |
205 | ||
206 | /** | |
207 | * blkg_lookup - lookup blkg for the specified blkcg - q pair | |
208 | * @blkcg: blkcg of interest | |
209 | * @q: request_queue of interest | |
210 | * | |
211 | * Lookup blkg for the @blkcg - @q pair. This function should be called | |
212 | * under RCU read lock. | |
213 | */ | |
214 | static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, | |
215 | struct request_queue *q) | |
216 | { | |
217 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
218 | return __blkg_lookup(blkcg, q, false); | |
219 | } | |
220 | ||
221 | /** | |
222 | * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair | |
223 | * @q: request_queue of interest | |
224 | * | |
225 | * Lookup blkg for @q at the root level. See also blkg_lookup(). | |
226 | */ | |
227 | static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) | |
228 | { | |
229 | return q->root_blkg; | |
230 | } | |
231 | ||
232 | /** | |
233 | * blkg_to_pdata - get policy private data | |
234 | * @blkg: blkg of interest | |
235 | * @pol: policy of interest | |
236 | * | |
237 | * Return pointer to private data associated with the @blkg-@pol pair. | |
238 | */ | |
239 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, | |
240 | struct blkcg_policy *pol) | |
241 | { | |
242 | return blkg ? blkg->pd[pol->plid] : NULL; | |
243 | } | |
244 | ||
245 | static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, | |
246 | struct blkcg_policy *pol) | |
247 | { | |
248 | return blkcg ? blkcg->cpd[pol->plid] : NULL; | |
249 | } | |
250 | ||
251 | /** | |
252 | * pdata_to_blkg - get blkg associated with policy private data | |
253 | * @pd: policy private data of interest | |
254 | * | |
255 | * @pd is policy private data. Determine the blkg it's associated with. | |
256 | */ | |
257 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) | |
258 | { | |
259 | return pd ? pd->blkg : NULL; | |
260 | } | |
261 | ||
262 | static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) | |
263 | { | |
264 | return cpd ? cpd->blkcg : NULL; | |
265 | } | |
266 | ||
267 | /** | |
268 | * blkg_path - format cgroup path of blkg | |
269 | * @blkg: blkg of interest | |
270 | * @buf: target buffer | |
271 | * @buflen: target buffer length | |
272 | * | |
273 | * Format the path of the cgroup of @blkg into @buf. | |
274 | */ | |
275 | static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) | |
276 | { | |
277 | return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); | |
278 | } | |
279 | ||
280 | /** | |
281 | * blkg_get - get a blkg reference | |
282 | * @blkg: blkg to get | |
283 | * | |
284 | * The caller should be holding an existing reference. | |
285 | */ | |
286 | static inline void blkg_get(struct blkcg_gq *blkg) | |
287 | { | |
288 | percpu_ref_get(&blkg->refcnt); | |
289 | } | |
290 | ||
291 | /** | |
292 | * blkg_tryget - try and get a blkg reference | |
293 | * @blkg: blkg to get | |
294 | * | |
295 | * This is for use when doing an RCU lookup of the blkg. We may be in the midst | |
296 | * of freeing this blkg, so we can only use it if the refcnt is not zero. | |
297 | */ | |
298 | static inline bool blkg_tryget(struct blkcg_gq *blkg) | |
299 | { | |
300 | return blkg && percpu_ref_tryget(&blkg->refcnt); | |
301 | } | |
302 | ||
303 | /** | |
304 | * blkg_put - put a blkg reference | |
305 | * @blkg: blkg to put | |
306 | */ | |
307 | static inline void blkg_put(struct blkcg_gq *blkg) | |
308 | { | |
309 | percpu_ref_put(&blkg->refcnt); | |
310 | } | |
311 | ||
312 | /** | |
313 | * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants | |
314 | * @d_blkg: loop cursor pointing to the current descendant | |
315 | * @pos_css: used for iteration | |
316 | * @p_blkg: target blkg to walk descendants of | |
317 | * | |
318 | * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU | |
319 | * read locked. If called under either blkcg or queue lock, the iteration | |
320 | * is guaranteed to include all and only online blkgs. The caller may | |
321 | * update @pos_css by calling css_rightmost_descendant() to skip subtree. | |
322 | * @p_blkg is included in the iteration and the first node to be visited. | |
323 | */ | |
324 | #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ | |
325 | css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ | |
326 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ | |
327 | (p_blkg)->q, false))) | |
328 | ||
329 | /** | |
330 | * blkg_for_each_descendant_post - post-order walk of a blkg's descendants | |
331 | * @d_blkg: loop cursor pointing to the current descendant | |
332 | * @pos_css: used for iteration | |
333 | * @p_blkg: target blkg to walk descendants of | |
334 | * | |
335 | * Similar to blkg_for_each_descendant_pre() but performs post-order | |
336 | * traversal instead. Synchronization rules are the same. @p_blkg is | |
337 | * included in the iteration and the last node to be visited. | |
338 | */ | |
339 | #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ | |
340 | css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ | |
341 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ | |
342 | (p_blkg)->q, false))) | |
343 | ||
344 | bool __blkcg_punt_bio_submit(struct bio *bio); | |
345 | ||
346 | static inline bool blkcg_punt_bio_submit(struct bio *bio) | |
347 | { | |
348 | if (bio->bi_opf & REQ_CGROUP_PUNT) | |
349 | return __blkcg_punt_bio_submit(bio); | |
350 | else | |
351 | return false; | |
352 | } | |
353 | ||
354 | static inline void blkcg_bio_issue_init(struct bio *bio) | |
355 | { | |
356 | bio_issue_init(&bio->bi_issue, bio_sectors(bio)); | |
357 | } | |
358 | ||
359 | static inline void blkcg_use_delay(struct blkcg_gq *blkg) | |
360 | { | |
361 | if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) | |
362 | return; | |
363 | if (atomic_add_return(1, &blkg->use_delay) == 1) | |
364 | atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); | |
365 | } | |
366 | ||
367 | static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) | |
368 | { | |
369 | int old = atomic_read(&blkg->use_delay); | |
370 | ||
371 | if (WARN_ON_ONCE(old < 0)) | |
372 | return 0; | |
373 | if (old == 0) | |
374 | return 0; | |
375 | ||
376 | /* | |
377 | * We do this song and dance because we can race with somebody else | |
378 | * adding or removing delay. If we just did an atomic_dec we'd end up | |
379 | * negative and we'd already be in trouble. We need to subtract 1 and | |
380 | * then check to see if we were the last delay so we can drop the | |
381 | * congestion count on the cgroup. | |
382 | */ | |
383 | while (old) { | |
384 | int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); | |
385 | if (cur == old) | |
386 | break; | |
387 | old = cur; | |
388 | } | |
389 | ||
390 | if (old == 0) | |
391 | return 0; | |
392 | if (old == 1) | |
393 | atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); | |
394 | return 1; | |
395 | } | |
396 | ||
397 | /** | |
398 | * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount | |
399 | * @blkg: target blkg | |
400 | * @delay: delay duration in nsecs | |
401 | * | |
402 | * When enabled with this function, the delay is not decayed and must be | |
403 | * explicitly cleared with blkcg_clear_delay(). Must not be mixed with | |
404 | * blkcg_[un]use_delay() and blkcg_add_delay() usages. | |
405 | */ | |
406 | static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) | |
407 | { | |
408 | int old = atomic_read(&blkg->use_delay); | |
409 | ||
410 | /* We only want 1 person setting the congestion count for this blkg. */ | |
411 | if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old) | |
412 | atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); | |
413 | ||
414 | atomic64_set(&blkg->delay_nsec, delay); | |
415 | } | |
416 | ||
417 | /** | |
418 | * blkcg_clear_delay - Disable allocator delay mechanism | |
419 | * @blkg: target blkg | |
420 | * | |
421 | * Disable use_delay mechanism. See blkcg_set_delay(). | |
422 | */ | |
423 | static inline void blkcg_clear_delay(struct blkcg_gq *blkg) | |
424 | { | |
425 | int old = atomic_read(&blkg->use_delay); | |
426 | ||
427 | /* We only want 1 person clearing the congestion count for this blkg. */ | |
428 | if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old) | |
429 | atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); | |
430 | } | |
431 | ||
6b2b0459 TH |
432 | /** |
433 | * blk_cgroup_mergeable - Determine whether to allow or disallow merges | |
434 | * @rq: request to merge into | |
435 | * @bio: bio to merge | |
436 | * | |
437 | * @bio and @rq should belong to the same cgroup and their issue_as_root should | |
438 | * match. The latter is necessary as we don't want to throttle e.g. a metadata | |
439 | * update because it happens to be next to a regular IO. | |
440 | */ | |
441 | static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) | |
442 | { | |
443 | return rq->bio->bi_blkg == bio->bi_blkg && | |
444 | bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio); | |
445 | } | |
446 | ||
672fdcf0 ML |
447 | void blk_cgroup_bio_start(struct bio *bio); |
448 | void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); | |
449 | #else /* CONFIG_BLK_CGROUP */ | |
450 | ||
451 | struct blkg_policy_data { | |
452 | }; | |
453 | ||
454 | struct blkcg_policy_data { | |
455 | }; | |
456 | ||
457 | struct blkcg_policy { | |
458 | }; | |
459 | ||
460 | #ifdef CONFIG_BLOCK | |
461 | ||
462 | static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } | |
463 | static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) | |
464 | { return NULL; } | |
465 | static inline int blkcg_init_queue(struct request_queue *q) { return 0; } | |
466 | static inline void blkcg_exit_queue(struct request_queue *q) { } | |
467 | static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } | |
468 | static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } | |
469 | static inline int blkcg_activate_policy(struct request_queue *q, | |
470 | const struct blkcg_policy *pol) { return 0; } | |
471 | static inline void blkcg_deactivate_policy(struct request_queue *q, | |
472 | const struct blkcg_policy *pol) { } | |
473 | ||
474 | static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; } | |
475 | ||
476 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, | |
477 | struct blkcg_policy *pol) { return NULL; } | |
478 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } | |
479 | static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } | |
480 | static inline void blkg_get(struct blkcg_gq *blkg) { } | |
481 | static inline void blkg_put(struct blkcg_gq *blkg) { } | |
482 | ||
483 | static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } | |
484 | static inline void blkcg_bio_issue_init(struct bio *bio) { } | |
485 | static inline void blk_cgroup_bio_start(struct bio *bio) { } | |
6b2b0459 | 486 | static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; } |
672fdcf0 ML |
487 | |
488 | #define blk_queue_for_each_rl(rl, q) \ | |
489 | for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) | |
490 | ||
491 | #endif /* CONFIG_BLOCK */ | |
492 | #endif /* CONFIG_BLK_CGROUP */ | |
493 | ||
494 | #endif /* _BLK_CGROUP_PRIVATE_H */ |