Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | /* |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
22084190 | 14 | #include <linux/kdev_t.h> |
9d6a986c | 15 | #include <linux/module.h> |
accee785 | 16 | #include <linux/err.h> |
9195291e | 17 | #include <linux/blkdev.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
34d0f179 | 19 | #include <linux/genhd.h> |
72e06c25 | 20 | #include <linux/delay.h> |
9a9e8a26 | 21 | #include <linux/atomic.h> |
72e06c25 | 22 | #include "blk-cgroup.h" |
5efd6113 | 23 | #include "blk.h" |
3e252066 | 24 | |
84c124da DS |
25 | #define MAX_KEY_LEN 100 |
26 | ||
bc0d6501 | 27 | static DEFINE_MUTEX(blkcg_pol_mutex); |
923adde1 | 28 | |
3c798398 TH |
29 | struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; |
30 | EXPORT_SYMBOL_GPL(blkcg_root); | |
9d6a986c | 31 | |
3c798398 | 32 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
035d10b2 | 33 | |
a2b1693b | 34 | static bool blkcg_policy_enabled(struct request_queue *q, |
3c798398 | 35 | const struct blkcg_policy *pol) |
a2b1693b TH |
36 | { |
37 | return pol && test_bit(pol->plid, q->blkcg_pols); | |
38 | } | |
39 | ||
0381411e TH |
40 | /** |
41 | * blkg_free - free a blkg | |
42 | * @blkg: blkg to free | |
43 | * | |
44 | * Free @blkg which may be partially allocated. | |
45 | */ | |
3c798398 | 46 | static void blkg_free(struct blkcg_gq *blkg) |
0381411e | 47 | { |
e8989fae | 48 | int i; |
549d3aa8 TH |
49 | |
50 | if (!blkg) | |
51 | return; | |
52 | ||
8bd435b3 | 53 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 54 | struct blkcg_policy *pol = blkcg_policy[i]; |
e8989fae TH |
55 | struct blkg_policy_data *pd = blkg->pd[i]; |
56 | ||
9ade5ea4 TH |
57 | if (!pd) |
58 | continue; | |
59 | ||
f9fcc2d3 TH |
60 | if (pol && pol->pd_exit_fn) |
61 | pol->pd_exit_fn(blkg); | |
9ade5ea4 | 62 | |
9ade5ea4 | 63 | kfree(pd); |
0381411e | 64 | } |
e8989fae | 65 | |
a051661c | 66 | blk_exit_rl(&blkg->rl); |
549d3aa8 | 67 | kfree(blkg); |
0381411e TH |
68 | } |
69 | ||
70 | /** | |
71 | * blkg_alloc - allocate a blkg | |
72 | * @blkcg: block cgroup the new blkg is associated with | |
73 | * @q: request_queue the new blkg is associated with | |
15974993 | 74 | * @gfp_mask: allocation mask to use |
0381411e | 75 | * |
e8989fae | 76 | * Allocate a new blkg assocating @blkcg and @q. |
0381411e | 77 | */ |
15974993 TH |
78 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, |
79 | gfp_t gfp_mask) | |
0381411e | 80 | { |
3c798398 | 81 | struct blkcg_gq *blkg; |
e8989fae | 82 | int i; |
0381411e TH |
83 | |
84 | /* alloc and init base part */ | |
15974993 | 85 | blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); |
0381411e TH |
86 | if (!blkg) |
87 | return NULL; | |
88 | ||
c875f4d0 | 89 | blkg->q = q; |
e8989fae | 90 | INIT_LIST_HEAD(&blkg->q_node); |
0381411e | 91 | blkg->blkcg = blkcg; |
1adaf3dd | 92 | blkg->refcnt = 1; |
0381411e | 93 | |
a051661c TH |
94 | /* root blkg uses @q->root_rl, init rl only for !root blkgs */ |
95 | if (blkcg != &blkcg_root) { | |
96 | if (blk_init_rl(&blkg->rl, q, gfp_mask)) | |
97 | goto err_free; | |
98 | blkg->rl.blkg = blkg; | |
99 | } | |
100 | ||
8bd435b3 | 101 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 102 | struct blkcg_policy *pol = blkcg_policy[i]; |
e8989fae | 103 | struct blkg_policy_data *pd; |
0381411e | 104 | |
a2b1693b | 105 | if (!blkcg_policy_enabled(q, pol)) |
e8989fae TH |
106 | continue; |
107 | ||
108 | /* alloc per-policy data and attach it to blkg */ | |
15974993 | 109 | pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); |
a051661c TH |
110 | if (!pd) |
111 | goto err_free; | |
549d3aa8 | 112 | |
e8989fae TH |
113 | blkg->pd[i] = pd; |
114 | pd->blkg = blkg; | |
e8989fae | 115 | |
9b2ea86b | 116 | /* invoke per-policy init */ |
a2b1693b | 117 | if (blkcg_policy_enabled(blkg->q, pol)) |
f9fcc2d3 | 118 | pol->pd_init_fn(blkg); |
e8989fae TH |
119 | } |
120 | ||
0381411e | 121 | return blkg; |
a051661c TH |
122 | |
123 | err_free: | |
124 | blkg_free(blkg); | |
125 | return NULL; | |
0381411e TH |
126 | } |
127 | ||
3c798398 TH |
128 | static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, |
129 | struct request_queue *q) | |
80fd9979 | 130 | { |
3c798398 | 131 | struct blkcg_gq *blkg; |
80fd9979 | 132 | |
a637120e TH |
133 | blkg = rcu_dereference(blkcg->blkg_hint); |
134 | if (blkg && blkg->q == q) | |
135 | return blkg; | |
136 | ||
137 | /* | |
138 | * Hint didn't match. Look up from the radix tree. Note that we | |
139 | * may not be holding queue_lock and thus are not sure whether | |
140 | * @blkg from blkg_tree has already been removed or not, so we | |
141 | * can't update hint to the lookup result. Leave it to the caller. | |
142 | */ | |
143 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); | |
144 | if (blkg && blkg->q == q) | |
145 | return blkg; | |
146 | ||
80fd9979 TH |
147 | return NULL; |
148 | } | |
149 | ||
150 | /** | |
151 | * blkg_lookup - lookup blkg for the specified blkcg - q pair | |
152 | * @blkcg: blkcg of interest | |
153 | * @q: request_queue of interest | |
154 | * | |
155 | * Lookup blkg for the @blkcg - @q pair. This function should be called | |
156 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing | |
157 | * - see blk_queue_bypass_start() for details. | |
158 | */ | |
3c798398 | 159 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) |
80fd9979 TH |
160 | { |
161 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
162 | ||
163 | if (unlikely(blk_queue_bypass(q))) | |
164 | return NULL; | |
165 | return __blkg_lookup(blkcg, q); | |
166 | } | |
167 | EXPORT_SYMBOL_GPL(blkg_lookup); | |
168 | ||
15974993 TH |
169 | /* |
170 | * If @new_blkg is %NULL, this function tries to allocate a new one as | |
171 | * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. | |
172 | */ | |
3c798398 | 173 | static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, |
15974993 TH |
174 | struct request_queue *q, |
175 | struct blkcg_gq *new_blkg) | |
5624a4e4 | 176 | { |
3c798398 | 177 | struct blkcg_gq *blkg; |
496fb780 | 178 | int ret; |
5624a4e4 | 179 | |
cd1604fa TH |
180 | WARN_ON_ONCE(!rcu_read_lock_held()); |
181 | lockdep_assert_held(q->queue_lock); | |
182 | ||
a637120e | 183 | /* lookup and update hint on success, see __blkg_lookup() for details */ |
80fd9979 | 184 | blkg = __blkg_lookup(blkcg, q); |
a637120e TH |
185 | if (blkg) { |
186 | rcu_assign_pointer(blkcg->blkg_hint, blkg); | |
15974993 | 187 | goto out_free; |
a637120e | 188 | } |
cd1604fa | 189 | |
7ee9c562 | 190 | /* blkg holds a reference to blkcg */ |
15974993 TH |
191 | if (!css_tryget(&blkcg->css)) { |
192 | blkg = ERR_PTR(-EINVAL); | |
193 | goto out_free; | |
194 | } | |
cd1604fa | 195 | |
496fb780 | 196 | /* allocate */ |
15974993 TH |
197 | if (!new_blkg) { |
198 | new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); | |
199 | if (unlikely(!new_blkg)) { | |
200 | blkg = ERR_PTR(-ENOMEM); | |
201 | goto out_put; | |
202 | } | |
203 | } | |
204 | blkg = new_blkg; | |
cd1604fa TH |
205 | |
206 | /* insert */ | |
207 | spin_lock(&blkcg->lock); | |
a637120e TH |
208 | ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); |
209 | if (likely(!ret)) { | |
210 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); | |
211 | list_add(&blkg->q_node, &q->blkg_list); | |
212 | } | |
cd1604fa | 213 | spin_unlock(&blkcg->lock); |
496fb780 | 214 | |
a637120e TH |
215 | if (!ret) |
216 | return blkg; | |
15974993 TH |
217 | |
218 | blkg = ERR_PTR(ret); | |
219 | out_put: | |
496fb780 | 220 | css_put(&blkcg->css); |
15974993 TH |
221 | out_free: |
222 | blkg_free(new_blkg); | |
223 | return blkg; | |
31e4c28d | 224 | } |
3c96cb32 | 225 | |
3c798398 TH |
226 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
227 | struct request_queue *q) | |
3c96cb32 TH |
228 | { |
229 | /* | |
230 | * This could be the first entry point of blkcg implementation and | |
231 | * we shouldn't allow anything to go through for a bypassing queue. | |
232 | */ | |
233 | if (unlikely(blk_queue_bypass(q))) | |
234 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | |
15974993 | 235 | return __blkg_lookup_create(blkcg, q, NULL); |
3c96cb32 | 236 | } |
cd1604fa | 237 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
31e4c28d | 238 | |
3c798398 | 239 | static void blkg_destroy(struct blkcg_gq *blkg) |
03aa264a | 240 | { |
3c798398 | 241 | struct blkcg *blkcg = blkg->blkcg; |
03aa264a | 242 | |
27e1f9d1 | 243 | lockdep_assert_held(blkg->q->queue_lock); |
9f13ef67 | 244 | lockdep_assert_held(&blkcg->lock); |
03aa264a TH |
245 | |
246 | /* Something wrong if we are trying to remove same group twice */ | |
e8989fae | 247 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
9f13ef67 | 248 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
a637120e TH |
249 | |
250 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); | |
e8989fae | 251 | list_del_init(&blkg->q_node); |
9f13ef67 | 252 | hlist_del_init_rcu(&blkg->blkcg_node); |
03aa264a | 253 | |
a637120e TH |
254 | /* |
255 | * Both setting lookup hint to and clearing it from @blkg are done | |
256 | * under queue_lock. If it's not pointing to @blkg now, it never | |
257 | * will. Hint assignment itself can race safely. | |
258 | */ | |
259 | if (rcu_dereference_raw(blkcg->blkg_hint) == blkg) | |
260 | rcu_assign_pointer(blkcg->blkg_hint, NULL); | |
261 | ||
03aa264a TH |
262 | /* |
263 | * Put the reference taken at the time of creation so that when all | |
264 | * queues are gone, group can be destroyed. | |
265 | */ | |
266 | blkg_put(blkg); | |
267 | } | |
268 | ||
9f13ef67 TH |
269 | /** |
270 | * blkg_destroy_all - destroy all blkgs associated with a request_queue | |
271 | * @q: request_queue of interest | |
9f13ef67 | 272 | * |
3c96cb32 | 273 | * Destroy all blkgs associated with @q. |
9f13ef67 | 274 | */ |
3c96cb32 | 275 | static void blkg_destroy_all(struct request_queue *q) |
72e06c25 | 276 | { |
3c798398 | 277 | struct blkcg_gq *blkg, *n; |
72e06c25 | 278 | |
6d18b008 | 279 | lockdep_assert_held(q->queue_lock); |
72e06c25 | 280 | |
9f13ef67 | 281 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
3c798398 | 282 | struct blkcg *blkcg = blkg->blkcg; |
72e06c25 | 283 | |
9f13ef67 TH |
284 | spin_lock(&blkcg->lock); |
285 | blkg_destroy(blkg); | |
286 | spin_unlock(&blkcg->lock); | |
72e06c25 TH |
287 | } |
288 | } | |
289 | ||
1adaf3dd TH |
290 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
291 | { | |
3c798398 | 292 | blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head)); |
1adaf3dd TH |
293 | } |
294 | ||
3c798398 | 295 | void __blkg_release(struct blkcg_gq *blkg) |
1adaf3dd TH |
296 | { |
297 | /* release the extra blkcg reference this blkg has been holding */ | |
298 | css_put(&blkg->blkcg->css); | |
299 | ||
300 | /* | |
301 | * A group is freed in rcu manner. But having an rcu lock does not | |
302 | * mean that one can access all the fields of blkg and assume these | |
303 | * are valid. For example, don't try to follow throtl_data and | |
304 | * request queue links. | |
305 | * | |
306 | * Having a reference to blkg under an rcu allows acess to only | |
307 | * values local to groups like group stats and group rate limits | |
308 | */ | |
309 | call_rcu(&blkg->rcu_head, blkg_rcu_free); | |
310 | } | |
311 | EXPORT_SYMBOL_GPL(__blkg_release); | |
312 | ||
a051661c TH |
313 | /* |
314 | * The next function used by blk_queue_for_each_rl(). It's a bit tricky | |
315 | * because the root blkg uses @q->root_rl instead of its own rl. | |
316 | */ | |
317 | struct request_list *__blk_queue_next_rl(struct request_list *rl, | |
318 | struct request_queue *q) | |
319 | { | |
320 | struct list_head *ent; | |
321 | struct blkcg_gq *blkg; | |
322 | ||
323 | /* | |
324 | * Determine the current blkg list_head. The first entry is | |
325 | * root_rl which is off @q->blkg_list and mapped to the head. | |
326 | */ | |
327 | if (rl == &q->root_rl) { | |
328 | ent = &q->blkg_list; | |
329 | } else { | |
330 | blkg = container_of(rl, struct blkcg_gq, rl); | |
331 | ent = &blkg->q_node; | |
332 | } | |
333 | ||
334 | /* walk to the next list_head, skip root blkcg */ | |
335 | ent = ent->next; | |
336 | if (ent == &q->root_blkg->q_node) | |
337 | ent = ent->next; | |
338 | if (ent == &q->blkg_list) | |
339 | return NULL; | |
340 | ||
341 | blkg = container_of(ent, struct blkcg_gq, q_node); | |
342 | return &blkg->rl; | |
343 | } | |
344 | ||
3c798398 TH |
345 | static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, |
346 | u64 val) | |
303a3acb | 347 | { |
3c798398 TH |
348 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
349 | struct blkcg_gq *blkg; | |
303a3acb | 350 | struct hlist_node *n; |
bc0d6501 | 351 | int i; |
303a3acb | 352 | |
bc0d6501 | 353 | mutex_lock(&blkcg_pol_mutex); |
303a3acb | 354 | spin_lock_irq(&blkcg->lock); |
997a026c TH |
355 | |
356 | /* | |
357 | * Note that stat reset is racy - it doesn't synchronize against | |
358 | * stat updates. This is a debug feature which shouldn't exist | |
359 | * anyway. If you get hit by a race, retry. | |
360 | */ | |
303a3acb | 361 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
8bd435b3 | 362 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 363 | struct blkcg_policy *pol = blkcg_policy[i]; |
549d3aa8 | 364 | |
a2b1693b | 365 | if (blkcg_policy_enabled(blkg->q, pol) && |
f9fcc2d3 TH |
366 | pol->pd_reset_stats_fn) |
367 | pol->pd_reset_stats_fn(blkg); | |
bc0d6501 | 368 | } |
303a3acb | 369 | } |
f0bdc8cd | 370 | |
303a3acb | 371 | spin_unlock_irq(&blkcg->lock); |
bc0d6501 | 372 | mutex_unlock(&blkcg_pol_mutex); |
303a3acb DS |
373 | return 0; |
374 | } | |
375 | ||
3c798398 | 376 | static const char *blkg_dev_name(struct blkcg_gq *blkg) |
303a3acb | 377 | { |
d3d32e69 TH |
378 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
379 | if (blkg->q->backing_dev_info.dev) | |
380 | return dev_name(blkg->q->backing_dev_info.dev); | |
381 | return NULL; | |
303a3acb DS |
382 | } |
383 | ||
d3d32e69 TH |
384 | /** |
385 | * blkcg_print_blkgs - helper for printing per-blkg data | |
386 | * @sf: seq_file to print to | |
387 | * @blkcg: blkcg of interest | |
388 | * @prfill: fill function to print out a blkg | |
389 | * @pol: policy in question | |
390 | * @data: data to be passed to @prfill | |
391 | * @show_total: to print out sum of prfill return values or not | |
392 | * | |
393 | * This function invokes @prfill on each blkg of @blkcg if pd for the | |
394 | * policy specified by @pol exists. @prfill is invoked with @sf, the | |
395 | * policy data and @data. If @show_total is %true, the sum of the return | |
396 | * values from @prfill is printed with "Total" label at the end. | |
397 | * | |
398 | * This is to be used to construct print functions for | |
399 | * cftype->read_seq_string method. | |
400 | */ | |
3c798398 | 401 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
f95a04af TH |
402 | u64 (*prfill)(struct seq_file *, |
403 | struct blkg_policy_data *, int), | |
3c798398 | 404 | const struct blkcg_policy *pol, int data, |
ec399347 | 405 | bool show_total) |
5624a4e4 | 406 | { |
3c798398 | 407 | struct blkcg_gq *blkg; |
d3d32e69 TH |
408 | struct hlist_node *n; |
409 | u64 total = 0; | |
5624a4e4 | 410 | |
d3d32e69 TH |
411 | spin_lock_irq(&blkcg->lock); |
412 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) | |
a2b1693b | 413 | if (blkcg_policy_enabled(blkg->q, pol)) |
f95a04af | 414 | total += prfill(sf, blkg->pd[pol->plid], data); |
d3d32e69 TH |
415 | spin_unlock_irq(&blkcg->lock); |
416 | ||
417 | if (show_total) | |
418 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); | |
419 | } | |
829fdb50 | 420 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
d3d32e69 TH |
421 | |
422 | /** | |
423 | * __blkg_prfill_u64 - prfill helper for a single u64 value | |
424 | * @sf: seq_file to print to | |
f95a04af | 425 | * @pd: policy private data of interest |
d3d32e69 TH |
426 | * @v: value to print |
427 | * | |
f95a04af | 428 | * Print @v to @sf for the device assocaited with @pd. |
d3d32e69 | 429 | */ |
f95a04af | 430 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
d3d32e69 | 431 | { |
f95a04af | 432 | const char *dname = blkg_dev_name(pd->blkg); |
d3d32e69 TH |
433 | |
434 | if (!dname) | |
435 | return 0; | |
436 | ||
437 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); | |
438 | return v; | |
439 | } | |
829fdb50 | 440 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
d3d32e69 TH |
441 | |
442 | /** | |
443 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat | |
444 | * @sf: seq_file to print to | |
f95a04af | 445 | * @pd: policy private data of interest |
d3d32e69 TH |
446 | * @rwstat: rwstat to print |
447 | * | |
f95a04af | 448 | * Print @rwstat to @sf for the device assocaited with @pd. |
d3d32e69 | 449 | */ |
f95a04af | 450 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
829fdb50 | 451 | const struct blkg_rwstat *rwstat) |
d3d32e69 TH |
452 | { |
453 | static const char *rwstr[] = { | |
454 | [BLKG_RWSTAT_READ] = "Read", | |
455 | [BLKG_RWSTAT_WRITE] = "Write", | |
456 | [BLKG_RWSTAT_SYNC] = "Sync", | |
457 | [BLKG_RWSTAT_ASYNC] = "Async", | |
458 | }; | |
f95a04af | 459 | const char *dname = blkg_dev_name(pd->blkg); |
d3d32e69 TH |
460 | u64 v; |
461 | int i; | |
462 | ||
463 | if (!dname) | |
464 | return 0; | |
465 | ||
466 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
467 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], | |
468 | (unsigned long long)rwstat->cnt[i]); | |
469 | ||
470 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; | |
471 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); | |
472 | return v; | |
473 | } | |
474 | ||
5bc4afb1 TH |
475 | /** |
476 | * blkg_prfill_stat - prfill callback for blkg_stat | |
477 | * @sf: seq_file to print to | |
f95a04af TH |
478 | * @pd: policy private data of interest |
479 | * @off: offset to the blkg_stat in @pd | |
5bc4afb1 TH |
480 | * |
481 | * prfill callback for printing a blkg_stat. | |
482 | */ | |
f95a04af | 483 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) |
d3d32e69 | 484 | { |
f95a04af | 485 | return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); |
d3d32e69 | 486 | } |
5bc4afb1 | 487 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); |
d3d32e69 | 488 | |
5bc4afb1 TH |
489 | /** |
490 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat | |
491 | * @sf: seq_file to print to | |
f95a04af TH |
492 | * @pd: policy private data of interest |
493 | * @off: offset to the blkg_rwstat in @pd | |
5bc4afb1 TH |
494 | * |
495 | * prfill callback for printing a blkg_rwstat. | |
496 | */ | |
f95a04af TH |
497 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
498 | int off) | |
d3d32e69 | 499 | { |
f95a04af | 500 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); |
d3d32e69 | 501 | |
f95a04af | 502 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
d3d32e69 | 503 | } |
5bc4afb1 | 504 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); |
d3d32e69 | 505 | |
3a8b31d3 TH |
506 | /** |
507 | * blkg_conf_prep - parse and prepare for per-blkg config update | |
508 | * @blkcg: target block cgroup | |
da8b0662 | 509 | * @pol: target policy |
3a8b31d3 TH |
510 | * @input: input string |
511 | * @ctx: blkg_conf_ctx to be filled | |
512 | * | |
513 | * Parse per-blkg config update from @input and initialize @ctx with the | |
514 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new | |
da8b0662 TH |
515 | * value. This function returns with RCU read lock and queue lock held and |
516 | * must be paired with blkg_conf_finish(). | |
3a8b31d3 | 517 | */ |
3c798398 TH |
518 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
519 | const char *input, struct blkg_conf_ctx *ctx) | |
da8b0662 | 520 | __acquires(rcu) __acquires(disk->queue->queue_lock) |
34d0f179 | 521 | { |
3a8b31d3 | 522 | struct gendisk *disk; |
3c798398 | 523 | struct blkcg_gq *blkg; |
726fa694 TH |
524 | unsigned int major, minor; |
525 | unsigned long long v; | |
526 | int part, ret; | |
34d0f179 | 527 | |
726fa694 TH |
528 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) |
529 | return -EINVAL; | |
3a8b31d3 | 530 | |
726fa694 | 531 | disk = get_gendisk(MKDEV(major, minor), &part); |
4bfd482e | 532 | if (!disk || part) |
726fa694 | 533 | return -EINVAL; |
e56da7e2 TH |
534 | |
535 | rcu_read_lock(); | |
4bfd482e | 536 | spin_lock_irq(disk->queue->queue_lock); |
da8b0662 | 537 | |
a2b1693b | 538 | if (blkcg_policy_enabled(disk->queue, pol)) |
3c96cb32 | 539 | blkg = blkg_lookup_create(blkcg, disk->queue); |
a2b1693b TH |
540 | else |
541 | blkg = ERR_PTR(-EINVAL); | |
e56da7e2 | 542 | |
4bfd482e TH |
543 | if (IS_ERR(blkg)) { |
544 | ret = PTR_ERR(blkg); | |
3a8b31d3 | 545 | rcu_read_unlock(); |
da8b0662 | 546 | spin_unlock_irq(disk->queue->queue_lock); |
3a8b31d3 TH |
547 | put_disk(disk); |
548 | /* | |
549 | * If queue was bypassing, we should retry. Do so after a | |
550 | * short msleep(). It isn't strictly necessary but queue | |
551 | * can be bypassing for some time and it's always nice to | |
552 | * avoid busy looping. | |
553 | */ | |
554 | if (ret == -EBUSY) { | |
555 | msleep(10); | |
556 | ret = restart_syscall(); | |
7702e8f4 | 557 | } |
726fa694 | 558 | return ret; |
062a644d | 559 | } |
3a8b31d3 TH |
560 | |
561 | ctx->disk = disk; | |
562 | ctx->blkg = blkg; | |
726fa694 TH |
563 | ctx->v = v; |
564 | return 0; | |
34d0f179 | 565 | } |
829fdb50 | 566 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
34d0f179 | 567 | |
3a8b31d3 TH |
568 | /** |
569 | * blkg_conf_finish - finish up per-blkg config update | |
570 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() | |
571 | * | |
572 | * Finish up after per-blkg config update. This function must be paired | |
573 | * with blkg_conf_prep(). | |
574 | */ | |
829fdb50 | 575 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
da8b0662 | 576 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
34d0f179 | 577 | { |
da8b0662 | 578 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
3a8b31d3 TH |
579 | rcu_read_unlock(); |
580 | put_disk(ctx->disk); | |
34d0f179 | 581 | } |
829fdb50 | 582 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
34d0f179 | 583 | |
3c798398 | 584 | struct cftype blkcg_files[] = { |
84c124da DS |
585 | { |
586 | .name = "reset_stats", | |
3c798398 | 587 | .write_u64 = blkcg_reset_stats, |
22084190 | 588 | }, |
4baf6e33 | 589 | { } /* terminate */ |
31e4c28d VG |
590 | }; |
591 | ||
9f13ef67 | 592 | /** |
3c798398 | 593 | * blkcg_pre_destroy - cgroup pre_destroy callback |
9f13ef67 TH |
594 | * @cgroup: cgroup of interest |
595 | * | |
596 | * This function is called when @cgroup is about to go away and responsible | |
597 | * for shooting down all blkgs associated with @cgroup. blkgs should be | |
598 | * removed while holding both q and blkcg locks. As blkcg lock is nested | |
599 | * inside q lock, this function performs reverse double lock dancing. | |
600 | * | |
601 | * This is the blkcg counterpart of ioc_release_fn(). | |
602 | */ | |
3c798398 | 603 | static int blkcg_pre_destroy(struct cgroup *cgroup) |
31e4c28d | 604 | { |
3c798398 | 605 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
b1c35769 | 606 | |
9f13ef67 | 607 | spin_lock_irq(&blkcg->lock); |
7ee9c562 | 608 | |
9f13ef67 | 609 | while (!hlist_empty(&blkcg->blkg_list)) { |
3c798398 TH |
610 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
611 | struct blkcg_gq, blkcg_node); | |
c875f4d0 | 612 | struct request_queue *q = blkg->q; |
b1c35769 | 613 | |
9f13ef67 TH |
614 | if (spin_trylock(q->queue_lock)) { |
615 | blkg_destroy(blkg); | |
616 | spin_unlock(q->queue_lock); | |
617 | } else { | |
618 | spin_unlock_irq(&blkcg->lock); | |
9f13ef67 | 619 | cpu_relax(); |
a5567932 | 620 | spin_lock_irq(&blkcg->lock); |
0f3942a3 | 621 | } |
9f13ef67 | 622 | } |
b1c35769 | 623 | |
9f13ef67 | 624 | spin_unlock_irq(&blkcg->lock); |
7ee9c562 TH |
625 | return 0; |
626 | } | |
627 | ||
3c798398 | 628 | static void blkcg_destroy(struct cgroup *cgroup) |
7ee9c562 | 629 | { |
3c798398 | 630 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
7ee9c562 | 631 | |
3c798398 | 632 | if (blkcg != &blkcg_root) |
67523c48 | 633 | kfree(blkcg); |
31e4c28d VG |
634 | } |
635 | ||
3c798398 | 636 | static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup) |
31e4c28d | 637 | { |
9a9e8a26 | 638 | static atomic64_t id_seq = ATOMIC64_INIT(0); |
3c798398 | 639 | struct blkcg *blkcg; |
0341509f | 640 | struct cgroup *parent = cgroup->parent; |
31e4c28d | 641 | |
0341509f | 642 | if (!parent) { |
3c798398 | 643 | blkcg = &blkcg_root; |
31e4c28d VG |
644 | goto done; |
645 | } | |
646 | ||
31e4c28d VG |
647 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
648 | if (!blkcg) | |
649 | return ERR_PTR(-ENOMEM); | |
650 | ||
3381cb8d | 651 | blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; |
9a9e8a26 | 652 | blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ |
31e4c28d VG |
653 | done: |
654 | spin_lock_init(&blkcg->lock); | |
a637120e | 655 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); |
31e4c28d VG |
656 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
657 | ||
658 | return &blkcg->css; | |
659 | } | |
660 | ||
5efd6113 TH |
661 | /** |
662 | * blkcg_init_queue - initialize blkcg part of request queue | |
663 | * @q: request_queue to initialize | |
664 | * | |
665 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | |
666 | * part of new request_queue @q. | |
667 | * | |
668 | * RETURNS: | |
669 | * 0 on success, -errno on failure. | |
670 | */ | |
671 | int blkcg_init_queue(struct request_queue *q) | |
672 | { | |
673 | might_sleep(); | |
674 | ||
3c96cb32 | 675 | return blk_throtl_init(q); |
5efd6113 TH |
676 | } |
677 | ||
678 | /** | |
679 | * blkcg_drain_queue - drain blkcg part of request_queue | |
680 | * @q: request_queue to drain | |
681 | * | |
682 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | |
683 | */ | |
684 | void blkcg_drain_queue(struct request_queue *q) | |
685 | { | |
686 | lockdep_assert_held(q->queue_lock); | |
687 | ||
688 | blk_throtl_drain(q); | |
689 | } | |
690 | ||
691 | /** | |
692 | * blkcg_exit_queue - exit and release blkcg part of request_queue | |
693 | * @q: request_queue being released | |
694 | * | |
695 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | |
696 | */ | |
697 | void blkcg_exit_queue(struct request_queue *q) | |
698 | { | |
6d18b008 | 699 | spin_lock_irq(q->queue_lock); |
3c96cb32 | 700 | blkg_destroy_all(q); |
6d18b008 TH |
701 | spin_unlock_irq(q->queue_lock); |
702 | ||
5efd6113 TH |
703 | blk_throtl_exit(q); |
704 | } | |
705 | ||
31e4c28d VG |
706 | /* |
707 | * We cannot support shared io contexts, as we have no mean to support | |
708 | * two tasks with the same ioc in two different groups without major rework | |
709 | * of the main cic data structures. For now we allow a task to change | |
710 | * its cgroup only if it's the only owner of its ioc. | |
711 | */ | |
3c798398 | 712 | static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
31e4c28d | 713 | { |
bb9d97b6 | 714 | struct task_struct *task; |
31e4c28d VG |
715 | struct io_context *ioc; |
716 | int ret = 0; | |
717 | ||
718 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
bb9d97b6 TH |
719 | cgroup_taskset_for_each(task, cgrp, tset) { |
720 | task_lock(task); | |
721 | ioc = task->io_context; | |
722 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
723 | ret = -EINVAL; | |
724 | task_unlock(task); | |
725 | if (ret) | |
726 | break; | |
727 | } | |
31e4c28d VG |
728 | return ret; |
729 | } | |
730 | ||
676f7c8f TH |
731 | struct cgroup_subsys blkio_subsys = { |
732 | .name = "blkio", | |
3c798398 TH |
733 | .create = blkcg_create, |
734 | .can_attach = blkcg_can_attach, | |
735 | .pre_destroy = blkcg_pre_destroy, | |
736 | .destroy = blkcg_destroy, | |
676f7c8f | 737 | .subsys_id = blkio_subsys_id, |
3c798398 | 738 | .base_cftypes = blkcg_files, |
676f7c8f TH |
739 | .module = THIS_MODULE, |
740 | }; | |
741 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
742 | ||
a2b1693b TH |
743 | /** |
744 | * blkcg_activate_policy - activate a blkcg policy on a request_queue | |
745 | * @q: request_queue of interest | |
746 | * @pol: blkcg policy to activate | |
747 | * | |
748 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through | |
749 | * bypass mode to populate its blkgs with policy_data for @pol. | |
750 | * | |
751 | * Activation happens with @q bypassed, so nobody would be accessing blkgs | |
752 | * from IO path. Update of each blkg is protected by both queue and blkcg | |
753 | * locks so that holding either lock and testing blkcg_policy_enabled() is | |
754 | * always enough for dereferencing policy data. | |
755 | * | |
756 | * The caller is responsible for synchronizing [de]activations and policy | |
757 | * [un]registerations. Returns 0 on success, -errno on failure. | |
758 | */ | |
759 | int blkcg_activate_policy(struct request_queue *q, | |
3c798398 | 760 | const struct blkcg_policy *pol) |
a2b1693b TH |
761 | { |
762 | LIST_HEAD(pds); | |
3c798398 | 763 | struct blkcg_gq *blkg; |
a2b1693b TH |
764 | struct blkg_policy_data *pd, *n; |
765 | int cnt = 0, ret; | |
15974993 | 766 | bool preloaded; |
a2b1693b TH |
767 | |
768 | if (blkcg_policy_enabled(q, pol)) | |
769 | return 0; | |
770 | ||
15974993 TH |
771 | /* preallocations for root blkg */ |
772 | blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); | |
773 | if (!blkg) | |
774 | return -ENOMEM; | |
775 | ||
776 | preloaded = !radix_tree_preload(GFP_KERNEL); | |
777 | ||
a2b1693b TH |
778 | blk_queue_bypass_start(q); |
779 | ||
780 | /* make sure the root blkg exists and count the existing blkgs */ | |
781 | spin_lock_irq(q->queue_lock); | |
782 | ||
783 | rcu_read_lock(); | |
15974993 | 784 | blkg = __blkg_lookup_create(&blkcg_root, q, blkg); |
a2b1693b TH |
785 | rcu_read_unlock(); |
786 | ||
15974993 TH |
787 | if (preloaded) |
788 | radix_tree_preload_end(); | |
789 | ||
a2b1693b TH |
790 | if (IS_ERR(blkg)) { |
791 | ret = PTR_ERR(blkg); | |
792 | goto out_unlock; | |
793 | } | |
794 | q->root_blkg = blkg; | |
a051661c | 795 | q->root_rl.blkg = blkg; |
a2b1693b TH |
796 | |
797 | list_for_each_entry(blkg, &q->blkg_list, q_node) | |
798 | cnt++; | |
799 | ||
800 | spin_unlock_irq(q->queue_lock); | |
801 | ||
802 | /* allocate policy_data for all existing blkgs */ | |
803 | while (cnt--) { | |
f95a04af | 804 | pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); |
a2b1693b TH |
805 | if (!pd) { |
806 | ret = -ENOMEM; | |
807 | goto out_free; | |
808 | } | |
809 | list_add_tail(&pd->alloc_node, &pds); | |
810 | } | |
811 | ||
812 | /* | |
813 | * Install the allocated pds. With @q bypassing, no new blkg | |
814 | * should have been created while the queue lock was dropped. | |
815 | */ | |
816 | spin_lock_irq(q->queue_lock); | |
817 | ||
818 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | |
819 | if (WARN_ON(list_empty(&pds))) { | |
820 | /* umm... this shouldn't happen, just abort */ | |
821 | ret = -ENOMEM; | |
822 | goto out_unlock; | |
823 | } | |
824 | pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); | |
825 | list_del_init(&pd->alloc_node); | |
826 | ||
827 | /* grab blkcg lock too while installing @pd on @blkg */ | |
828 | spin_lock(&blkg->blkcg->lock); | |
829 | ||
830 | blkg->pd[pol->plid] = pd; | |
831 | pd->blkg = blkg; | |
f9fcc2d3 | 832 | pol->pd_init_fn(blkg); |
a2b1693b TH |
833 | |
834 | spin_unlock(&blkg->blkcg->lock); | |
835 | } | |
836 | ||
837 | __set_bit(pol->plid, q->blkcg_pols); | |
838 | ret = 0; | |
839 | out_unlock: | |
840 | spin_unlock_irq(q->queue_lock); | |
841 | out_free: | |
842 | blk_queue_bypass_end(q); | |
843 | list_for_each_entry_safe(pd, n, &pds, alloc_node) | |
844 | kfree(pd); | |
845 | return ret; | |
846 | } | |
847 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); | |
848 | ||
849 | /** | |
850 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue | |
851 | * @q: request_queue of interest | |
852 | * @pol: blkcg policy to deactivate | |
853 | * | |
854 | * Deactivate @pol on @q. Follows the same synchronization rules as | |
855 | * blkcg_activate_policy(). | |
856 | */ | |
857 | void blkcg_deactivate_policy(struct request_queue *q, | |
3c798398 | 858 | const struct blkcg_policy *pol) |
a2b1693b | 859 | { |
3c798398 | 860 | struct blkcg_gq *blkg; |
a2b1693b TH |
861 | |
862 | if (!blkcg_policy_enabled(q, pol)) | |
863 | return; | |
864 | ||
865 | blk_queue_bypass_start(q); | |
866 | spin_lock_irq(q->queue_lock); | |
867 | ||
868 | __clear_bit(pol->plid, q->blkcg_pols); | |
869 | ||
6d18b008 TH |
870 | /* if no policy is left, no need for blkgs - shoot them down */ |
871 | if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) | |
872 | blkg_destroy_all(q); | |
873 | ||
a2b1693b TH |
874 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
875 | /* grab blkcg lock too while removing @pd from @blkg */ | |
876 | spin_lock(&blkg->blkcg->lock); | |
877 | ||
f9fcc2d3 TH |
878 | if (pol->pd_exit_fn) |
879 | pol->pd_exit_fn(blkg); | |
a2b1693b TH |
880 | |
881 | kfree(blkg->pd[pol->plid]); | |
882 | blkg->pd[pol->plid] = NULL; | |
883 | ||
884 | spin_unlock(&blkg->blkcg->lock); | |
885 | } | |
886 | ||
887 | spin_unlock_irq(q->queue_lock); | |
888 | blk_queue_bypass_end(q); | |
889 | } | |
890 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); | |
891 | ||
8bd435b3 | 892 | /** |
3c798398 TH |
893 | * blkcg_policy_register - register a blkcg policy |
894 | * @pol: blkcg policy to register | |
8bd435b3 | 895 | * |
3c798398 TH |
896 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
897 | * successful registration. Returns 0 on success and -errno on failure. | |
8bd435b3 | 898 | */ |
3c798398 | 899 | int blkcg_policy_register(struct blkcg_policy *pol) |
3e252066 | 900 | { |
8bd435b3 | 901 | int i, ret; |
e8989fae | 902 | |
f95a04af TH |
903 | if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) |
904 | return -EINVAL; | |
905 | ||
bc0d6501 TH |
906 | mutex_lock(&blkcg_pol_mutex); |
907 | ||
8bd435b3 TH |
908 | /* find an empty slot */ |
909 | ret = -ENOSPC; | |
910 | for (i = 0; i < BLKCG_MAX_POLS; i++) | |
3c798398 | 911 | if (!blkcg_policy[i]) |
8bd435b3 TH |
912 | break; |
913 | if (i >= BLKCG_MAX_POLS) | |
914 | goto out_unlock; | |
035d10b2 | 915 | |
8bd435b3 | 916 | /* register and update blkgs */ |
3c798398 TH |
917 | pol->plid = i; |
918 | blkcg_policy[i] = pol; | |
8bd435b3 | 919 | |
8bd435b3 | 920 | /* everything is in place, add intf files for the new policy */ |
3c798398 TH |
921 | if (pol->cftypes) |
922 | WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes)); | |
8bd435b3 TH |
923 | ret = 0; |
924 | out_unlock: | |
bc0d6501 | 925 | mutex_unlock(&blkcg_pol_mutex); |
8bd435b3 | 926 | return ret; |
3e252066 | 927 | } |
3c798398 | 928 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
3e252066 | 929 | |
8bd435b3 | 930 | /** |
3c798398 TH |
931 | * blkcg_policy_unregister - unregister a blkcg policy |
932 | * @pol: blkcg policy to unregister | |
8bd435b3 | 933 | * |
3c798398 | 934 | * Undo blkcg_policy_register(@pol). Might sleep. |
8bd435b3 | 935 | */ |
3c798398 | 936 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
3e252066 | 937 | { |
bc0d6501 TH |
938 | mutex_lock(&blkcg_pol_mutex); |
939 | ||
3c798398 | 940 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
8bd435b3 TH |
941 | goto out_unlock; |
942 | ||
943 | /* kill the intf files first */ | |
3c798398 TH |
944 | if (pol->cftypes) |
945 | cgroup_rm_cftypes(&blkio_subsys, pol->cftypes); | |
44ea53de | 946 | |
8bd435b3 | 947 | /* unregister and update blkgs */ |
3c798398 | 948 | blkcg_policy[pol->plid] = NULL; |
8bd435b3 | 949 | out_unlock: |
bc0d6501 | 950 | mutex_unlock(&blkcg_pol_mutex); |
3e252066 | 951 | } |
3c798398 | 952 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |