2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include <linux/genhd.h>
21 #include <linux/delay.h>
22 #include "blk-cgroup.h"
25 #define MAX_KEY_LEN 100
27 static DEFINE_SPINLOCK(blkio_list_lock);
28 static LIST_HEAD(blkio_list);
30 static DEFINE_MUTEX(all_q_mutex);
31 static LIST_HEAD(all_q_list);
33 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
34 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
36 static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
38 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
40 static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
41 struct cgroup_taskset *);
42 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
43 struct cgroup_taskset *);
44 static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
45 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
46 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
48 /* for encoding cft->private value on file */
49 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
50 /* What policy owns the file, proportional or throttle */
51 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
52 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
54 struct cgroup_subsys blkio_subsys = {
56 .create = blkiocg_create,
57 .can_attach = blkiocg_can_attach,
58 .attach = blkiocg_attach,
59 .pre_destroy = blkiocg_pre_destroy,
60 .destroy = blkiocg_destroy,
61 .populate = blkiocg_populate,
62 .subsys_id = blkio_subsys_id,
63 .module = THIS_MODULE,
65 EXPORT_SYMBOL_GPL(blkio_subsys);
67 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
69 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
70 struct blkio_cgroup, css);
72 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
74 struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
76 return container_of(task_subsys_state(tsk, blkio_subsys_id),
77 struct blkio_cgroup, css);
79 EXPORT_SYMBOL_GPL(task_blkio_cgroup);
81 static inline void blkio_update_group_weight(struct blkio_group *blkg,
82 int plid, unsigned int weight)
84 struct blkio_policy_type *blkiop;
86 list_for_each_entry(blkiop, &blkio_list, list) {
87 /* If this policy does not own the blkg, do not send updates */
88 if (blkiop->plid != plid)
90 if (blkiop->ops.blkio_update_group_weight_fn)
91 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
96 static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
99 struct blkio_policy_type *blkiop;
101 list_for_each_entry(blkiop, &blkio_list, list) {
103 /* If this policy does not own the blkg, do not send updates */
104 if (blkiop->plid != plid)
107 if (fileid == BLKIO_THROTL_read_bps_device
108 && blkiop->ops.blkio_update_group_read_bps_fn)
109 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
112 if (fileid == BLKIO_THROTL_write_bps_device
113 && blkiop->ops.blkio_update_group_write_bps_fn)
114 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
119 static inline void blkio_update_group_iops(struct blkio_group *blkg,
120 int plid, unsigned int iops,
123 struct blkio_policy_type *blkiop;
125 list_for_each_entry(blkiop, &blkio_list, list) {
127 /* If this policy does not own the blkg, do not send updates */
128 if (blkiop->plid != plid)
131 if (fileid == BLKIO_THROTL_read_iops_device
132 && blkiop->ops.blkio_update_group_read_iops_fn)
133 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
136 if (fileid == BLKIO_THROTL_write_iops_device
137 && blkiop->ops.blkio_update_group_write_iops_fn)
138 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
144 * Add to the appropriate stat variable depending on the request type.
145 * This should be called with the blkg->stats_lock held.
147 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
151 stat[BLKIO_STAT_WRITE] += add;
153 stat[BLKIO_STAT_READ] += add;
155 stat[BLKIO_STAT_SYNC] += add;
157 stat[BLKIO_STAT_ASYNC] += add;
161 * Decrements the appropriate stat variable if non-zero depending on the
162 * request type. Panics on value being zero.
163 * This should be called with the blkg->stats_lock held.
165 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
168 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
169 stat[BLKIO_STAT_WRITE]--;
171 BUG_ON(stat[BLKIO_STAT_READ] == 0);
172 stat[BLKIO_STAT_READ]--;
175 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
176 stat[BLKIO_STAT_SYNC]--;
178 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
179 stat[BLKIO_STAT_ASYNC]--;
183 #ifdef CONFIG_DEBUG_BLK_CGROUP
184 /* This should be called with the blkg->stats_lock held. */
185 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
186 struct blkio_policy_type *pol,
187 struct blkio_group *curr_blkg)
189 struct blkg_policy_data *pd = blkg->pd[pol->plid];
191 if (blkio_blkg_waiting(&pd->stats))
193 if (blkg == curr_blkg)
195 pd->stats.start_group_wait_time = sched_clock();
196 blkio_mark_blkg_waiting(&pd->stats);
199 /* This should be called with the blkg->stats_lock held. */
200 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
202 unsigned long long now;
204 if (!blkio_blkg_waiting(stats))
208 if (time_after64(now, stats->start_group_wait_time))
209 stats->group_wait_time += now - stats->start_group_wait_time;
210 blkio_clear_blkg_waiting(stats);
213 /* This should be called with the blkg->stats_lock held. */
214 static void blkio_end_empty_time(struct blkio_group_stats *stats)
216 unsigned long long now;
218 if (!blkio_blkg_empty(stats))
222 if (time_after64(now, stats->start_empty_time))
223 stats->empty_time += now - stats->start_empty_time;
224 blkio_clear_blkg_empty(stats);
227 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
228 struct blkio_policy_type *pol)
230 struct blkg_policy_data *pd = blkg->pd[pol->plid];
233 spin_lock_irqsave(&blkg->stats_lock, flags);
234 BUG_ON(blkio_blkg_idling(&pd->stats));
235 pd->stats.start_idle_time = sched_clock();
236 blkio_mark_blkg_idling(&pd->stats);
237 spin_unlock_irqrestore(&blkg->stats_lock, flags);
239 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
241 void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
242 struct blkio_policy_type *pol)
244 struct blkg_policy_data *pd = blkg->pd[pol->plid];
246 unsigned long long now;
247 struct blkio_group_stats *stats;
249 spin_lock_irqsave(&blkg->stats_lock, flags);
251 if (blkio_blkg_idling(stats)) {
253 if (time_after64(now, stats->start_idle_time))
254 stats->idle_time += now - stats->start_idle_time;
255 blkio_clear_blkg_idling(stats);
257 spin_unlock_irqrestore(&blkg->stats_lock, flags);
259 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
261 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
262 struct blkio_policy_type *pol)
264 struct blkg_policy_data *pd = blkg->pd[pol->plid];
266 struct blkio_group_stats *stats;
268 spin_lock_irqsave(&blkg->stats_lock, flags);
270 stats->avg_queue_size_sum +=
271 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
272 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
273 stats->avg_queue_size_samples++;
274 blkio_update_group_wait_time(stats);
275 spin_unlock_irqrestore(&blkg->stats_lock, flags);
277 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
279 void blkiocg_set_start_empty_time(struct blkio_group *blkg,
280 struct blkio_policy_type *pol)
282 struct blkg_policy_data *pd = blkg->pd[pol->plid];
284 struct blkio_group_stats *stats;
286 spin_lock_irqsave(&blkg->stats_lock, flags);
289 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
290 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
291 spin_unlock_irqrestore(&blkg->stats_lock, flags);
296 * group is already marked empty. This can happen if cfqq got new
297 * request in parent group and moved to this group while being added
298 * to service tree. Just ignore the event and move on.
300 if(blkio_blkg_empty(stats)) {
301 spin_unlock_irqrestore(&blkg->stats_lock, flags);
305 stats->start_empty_time = sched_clock();
306 blkio_mark_blkg_empty(stats);
307 spin_unlock_irqrestore(&blkg->stats_lock, flags);
309 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
311 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
312 struct blkio_policy_type *pol,
313 unsigned long dequeue)
315 struct blkg_policy_data *pd = blkg->pd[pol->plid];
317 pd->stats.dequeue += dequeue;
319 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
321 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
322 struct blkio_policy_type *pol,
323 struct blkio_group *curr_blkg) { }
324 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
327 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
328 struct blkio_policy_type *pol,
329 struct blkio_group *curr_blkg, bool direction,
332 struct blkg_policy_data *pd = blkg->pd[pol->plid];
335 spin_lock_irqsave(&blkg->stats_lock, flags);
336 blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
338 blkio_end_empty_time(&pd->stats);
339 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
340 spin_unlock_irqrestore(&blkg->stats_lock, flags);
342 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
344 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
345 struct blkio_policy_type *pol,
346 bool direction, bool sync)
348 struct blkg_policy_data *pd = blkg->pd[pol->plid];
351 spin_lock_irqsave(&blkg->stats_lock, flags);
352 blkio_check_and_dec_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED],
354 spin_unlock_irqrestore(&blkg->stats_lock, flags);
356 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
358 void blkiocg_update_timeslice_used(struct blkio_group *blkg,
359 struct blkio_policy_type *pol,
361 unsigned long unaccounted_time)
363 struct blkg_policy_data *pd = blkg->pd[pol->plid];
366 spin_lock_irqsave(&blkg->stats_lock, flags);
367 pd->stats.time += time;
368 #ifdef CONFIG_DEBUG_BLK_CGROUP
369 pd->stats.unaccounted_time += unaccounted_time;
371 spin_unlock_irqrestore(&blkg->stats_lock, flags);
373 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
376 * should be called under rcu read lock or queue lock to make sure blkg pointer
379 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
380 struct blkio_policy_type *pol,
381 uint64_t bytes, bool direction, bool sync)
383 struct blkg_policy_data *pd = blkg->pd[pol->plid];
384 struct blkio_group_stats_cpu *stats_cpu;
388 * Disabling interrupts to provide mutual exclusion between two
389 * writes on same cpu. It probably is not needed for 64bit. Not
390 * optimizing that case yet.
392 local_irq_save(flags);
394 stats_cpu = this_cpu_ptr(pd->stats_cpu);
396 u64_stats_update_begin(&stats_cpu->syncp);
397 stats_cpu->sectors += bytes >> 9;
398 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
400 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
401 bytes, direction, sync);
402 u64_stats_update_end(&stats_cpu->syncp);
403 local_irq_restore(flags);
405 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
407 void blkiocg_update_completion_stats(struct blkio_group *blkg,
408 struct blkio_policy_type *pol,
410 uint64_t io_start_time, bool direction,
413 struct blkg_policy_data *pd = blkg->pd[pol->plid];
414 struct blkio_group_stats *stats;
416 unsigned long long now = sched_clock();
418 spin_lock_irqsave(&blkg->stats_lock, flags);
420 if (time_after64(now, io_start_time))
421 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
422 now - io_start_time, direction, sync);
423 if (time_after64(io_start_time, start_time))
424 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
425 io_start_time - start_time, direction, sync);
426 spin_unlock_irqrestore(&blkg->stats_lock, flags);
428 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
430 /* Merged stats are per cpu. */
431 void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
432 struct blkio_policy_type *pol,
433 bool direction, bool sync)
435 struct blkg_policy_data *pd = blkg->pd[pol->plid];
436 struct blkio_group_stats_cpu *stats_cpu;
440 * Disabling interrupts to provide mutual exclusion between two
441 * writes on same cpu. It probably is not needed for 64bit. Not
442 * optimizing that case yet.
444 local_irq_save(flags);
446 stats_cpu = this_cpu_ptr(pd->stats_cpu);
448 u64_stats_update_begin(&stats_cpu->syncp);
449 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
451 u64_stats_update_end(&stats_cpu->syncp);
452 local_irq_restore(flags);
454 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
457 * blkg_free - free a blkg
458 * @blkg: blkg to free
460 * Free @blkg which may be partially allocated.
462 static void blkg_free(struct blkio_group *blkg)
469 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
470 struct blkg_policy_data *pd = blkg->pd[i];
473 free_percpu(pd->stats_cpu);
482 * blkg_alloc - allocate a blkg
483 * @blkcg: block cgroup the new blkg is associated with
484 * @q: request_queue the new blkg is associated with
486 * Allocate a new blkg assocating @blkcg and @q.
488 * FIXME: Should be called with queue locked but currently isn't due to
489 * percpu stat breakage.
491 static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
492 struct request_queue *q)
494 struct blkio_group *blkg;
497 /* alloc and init base part */
498 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
502 spin_lock_init(&blkg->stats_lock);
504 INIT_LIST_HEAD(&blkg->q_node);
507 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
509 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
510 struct blkio_policy_type *pol = blkio_policy[i];
511 struct blkg_policy_data *pd;
516 /* alloc per-policy data and attach it to blkg */
517 pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
527 /* broken, read comment in the callsite */
528 pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
529 if (!pd->stats_cpu) {
535 /* invoke per-policy init */
536 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
537 struct blkio_policy_type *pol = blkio_policy[i];
540 pol->ops.blkio_init_group_fn(blkg);
546 struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
547 struct request_queue *q,
548 enum blkio_policy_id plid,
550 __releases(q->queue_lock) __acquires(q->queue_lock)
552 struct blkio_group *blkg, *new_blkg;
554 WARN_ON_ONCE(!rcu_read_lock_held());
555 lockdep_assert_held(q->queue_lock);
558 * This could be the first entry point of blkcg implementation and
559 * we shouldn't allow anything to go through for a bypassing queue.
560 * The following can be removed if blkg lookup is guaranteed to
561 * fail on a bypassing queue.
563 if (unlikely(blk_queue_bypass(q)) && !for_root)
564 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
566 blkg = blkg_lookup(blkcg, q);
570 /* blkg holds a reference to blkcg */
571 if (!css_tryget(&blkcg->css))
572 return ERR_PTR(-EINVAL);
575 * Allocate and initialize.
577 * FIXME: The following is broken. Percpu memory allocation
578 * requires %GFP_KERNEL context and can't be performed from IO
579 * path. Allocation here should inherently be atomic and the
580 * following lock dancing can be removed once the broken percpu
581 * allocation is fixed.
583 spin_unlock_irq(q->queue_lock);
586 new_blkg = blkg_alloc(blkcg, q);
589 spin_lock_irq(q->queue_lock);
591 /* did bypass get turned on inbetween? */
592 if (unlikely(blk_queue_bypass(q)) && !for_root) {
593 blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
597 /* did someone beat us to it? */
598 blkg = blkg_lookup(blkcg, q);
602 /* did alloc fail? */
603 if (unlikely(!new_blkg)) {
604 blkg = ERR_PTR(-ENOMEM);
609 spin_lock(&blkcg->lock);
610 swap(blkg, new_blkg);
612 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
613 list_add(&blkg->q_node, &q->blkg_list);
615 spin_unlock(&blkcg->lock);
620 EXPORT_SYMBOL_GPL(blkg_lookup_create);
622 /* called under rcu_read_lock(). */
623 struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
624 struct request_queue *q)
626 struct blkio_group *blkg;
627 struct hlist_node *n;
629 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
634 EXPORT_SYMBOL_GPL(blkg_lookup);
636 static void blkg_destroy(struct blkio_group *blkg)
638 struct request_queue *q = blkg->q;
639 struct blkio_cgroup *blkcg = blkg->blkcg;
641 lockdep_assert_held(q->queue_lock);
642 lockdep_assert_held(&blkcg->lock);
644 /* Something wrong if we are trying to remove same group twice */
645 WARN_ON_ONCE(list_empty(&blkg->q_node));
646 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
647 list_del_init(&blkg->q_node);
648 hlist_del_init_rcu(&blkg->blkcg_node);
651 * Put the reference taken at the time of creation so that when all
652 * queues are gone, group can be destroyed.
658 * XXX: This updates blkg policy data in-place for root blkg, which is
659 * necessary across elevator switch and policy registration as root blkgs
660 * aren't shot down. This broken and racy implementation is temporary.
661 * Eventually, blkg shoot down will be replaced by proper in-place update.
663 void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
665 struct blkio_policy_type *pol = blkio_policy[plid];
666 struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
667 struct blkg_policy_data *pd;
672 kfree(blkg->pd[plid]);
673 blkg->pd[plid] = NULL;
678 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
681 pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
682 WARN_ON_ONCE(!pd->stats_cpu);
686 pol->ops.blkio_init_group_fn(blkg);
688 EXPORT_SYMBOL_GPL(update_root_blkg_pd);
691 * blkg_destroy_all - destroy all blkgs associated with a request_queue
692 * @q: request_queue of interest
693 * @destroy_root: whether to destroy root blkg or not
695 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
696 * destroyed; otherwise, root blkg is left alone.
698 void blkg_destroy_all(struct request_queue *q, bool destroy_root)
700 struct blkio_group *blkg, *n;
702 spin_lock_irq(q->queue_lock);
704 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
705 struct blkio_cgroup *blkcg = blkg->blkcg;
708 if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
711 spin_lock(&blkcg->lock);
713 spin_unlock(&blkcg->lock);
716 spin_unlock_irq(q->queue_lock);
718 EXPORT_SYMBOL_GPL(blkg_destroy_all);
720 static void blkg_rcu_free(struct rcu_head *rcu_head)
722 blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
725 void __blkg_release(struct blkio_group *blkg)
727 /* release the extra blkcg reference this blkg has been holding */
728 css_put(&blkg->blkcg->css);
731 * A group is freed in rcu manner. But having an rcu lock does not
732 * mean that one can access all the fields of blkg and assume these
733 * are valid. For example, don't try to follow throtl_data and
734 * request queue links.
736 * Having a reference to blkg under an rcu allows acess to only
737 * values local to groups like group stats and group rate limits
739 call_rcu(&blkg->rcu_head, blkg_rcu_free);
741 EXPORT_SYMBOL_GPL(__blkg_release);
743 static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
745 struct blkg_policy_data *pd = blkg->pd[plid];
746 struct blkio_group_stats_cpu *stats_cpu;
749 * Note: On 64 bit arch this should not be an issue. This has the
750 * possibility of returning some inconsistent value on 32bit arch
751 * as 64bit update on 32bit is non atomic. Taking care of this
752 * corner case makes code very complicated, like sending IPIs to
753 * cpus, taking care of stats of offline cpus etc.
755 * reset stats is anyway more of a debug feature and this sounds a
756 * corner case. So I am not complicating the code yet until and
757 * unless this becomes a real issue.
759 for_each_possible_cpu(i) {
760 stats_cpu = per_cpu_ptr(pd->stats_cpu, i);
761 stats_cpu->sectors = 0;
762 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
763 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
764 stats_cpu->stat_arr_cpu[j][k] = 0;
769 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
771 struct blkio_cgroup *blkcg;
772 struct blkio_group *blkg;
773 struct blkio_group_stats *stats;
774 struct hlist_node *n;
775 uint64_t queued[BLKIO_STAT_TOTAL];
777 #ifdef CONFIG_DEBUG_BLK_CGROUP
778 bool idling, waiting, empty;
779 unsigned long long now = sched_clock();
782 blkcg = cgroup_to_blkio_cgroup(cgroup);
783 spin_lock(&blkio_list_lock);
784 spin_lock_irq(&blkcg->lock);
785 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
786 struct blkio_policy_type *pol;
788 list_for_each_entry(pol, &blkio_list, list) {
789 struct blkg_policy_data *pd = blkg->pd[pol->plid];
791 spin_lock(&blkg->stats_lock);
793 #ifdef CONFIG_DEBUG_BLK_CGROUP
794 idling = blkio_blkg_idling(stats);
795 waiting = blkio_blkg_waiting(stats);
796 empty = blkio_blkg_empty(stats);
798 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
799 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
800 memset(stats, 0, sizeof(struct blkio_group_stats));
801 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
802 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
803 #ifdef CONFIG_DEBUG_BLK_CGROUP
805 blkio_mark_blkg_idling(stats);
806 stats->start_idle_time = now;
809 blkio_mark_blkg_waiting(stats);
810 stats->start_group_wait_time = now;
813 blkio_mark_blkg_empty(stats);
814 stats->start_empty_time = now;
817 spin_unlock(&blkg->stats_lock);
819 /* Reset Per cpu stats which don't take blkg->stats_lock */
820 blkio_reset_stats_cpu(blkg, pol->plid);
824 spin_unlock_irq(&blkcg->lock);
825 spin_unlock(&blkio_list_lock);
829 static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
830 char *str, int chars_left, bool diskname_only)
832 snprintf(str, chars_left, "%s", dname);
833 chars_left -= strlen(str);
834 if (chars_left <= 0) {
836 "Possibly incorrect cgroup stat display format");
842 case BLKIO_STAT_READ:
843 strlcat(str, " Read", chars_left);
845 case BLKIO_STAT_WRITE:
846 strlcat(str, " Write", chars_left);
848 case BLKIO_STAT_SYNC:
849 strlcat(str, " Sync", chars_left);
851 case BLKIO_STAT_ASYNC:
852 strlcat(str, " Async", chars_left);
854 case BLKIO_STAT_TOTAL:
855 strlcat(str, " Total", chars_left);
858 strlcat(str, " Invalid", chars_left);
862 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
863 struct cgroup_map_cb *cb, const char *dname)
865 blkio_get_key_name(0, dname, str, chars_left, true);
866 cb->fill(cb, str, val);
871 static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid,
872 enum stat_type_cpu type, enum stat_sub_type sub_type)
874 struct blkg_policy_data *pd = blkg->pd[plid];
876 struct blkio_group_stats_cpu *stats_cpu;
879 for_each_possible_cpu(cpu) {
881 stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu);
884 start = u64_stats_fetch_begin(&stats_cpu->syncp);
885 if (type == BLKIO_STAT_CPU_SECTORS)
886 tval = stats_cpu->sectors;
888 tval = stats_cpu->stat_arr_cpu[type][sub_type];
889 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
897 static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid,
898 struct cgroup_map_cb *cb, const char *dname,
899 enum stat_type_cpu type)
901 uint64_t disk_total, val;
902 char key_str[MAX_KEY_LEN];
903 enum stat_sub_type sub_type;
905 if (type == BLKIO_STAT_CPU_SECTORS) {
906 val = blkio_read_stat_cpu(blkg, plid, type, 0);
907 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
911 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
913 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
915 val = blkio_read_stat_cpu(blkg, plid, type, sub_type);
916 cb->fill(cb, key_str, val);
919 disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) +
920 blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE);
922 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
924 cb->fill(cb, key_str, disk_total);
928 /* This should be called with blkg->stats_lock held */
929 static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
930 struct cgroup_map_cb *cb, const char *dname,
933 struct blkg_policy_data *pd = blkg->pd[plid];
935 char key_str[MAX_KEY_LEN];
936 enum stat_sub_type sub_type;
938 if (type == BLKIO_STAT_TIME)
939 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
940 pd->stats.time, cb, dname);
941 #ifdef CONFIG_DEBUG_BLK_CGROUP
942 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
943 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
944 pd->stats.unaccounted_time, cb, dname);
945 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
946 uint64_t sum = pd->stats.avg_queue_size_sum;
947 uint64_t samples = pd->stats.avg_queue_size_samples;
949 do_div(sum, samples);
952 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
955 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
956 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
957 pd->stats.group_wait_time, cb, dname);
958 if (type == BLKIO_STAT_IDLE_TIME)
959 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
960 pd->stats.idle_time, cb, dname);
961 if (type == BLKIO_STAT_EMPTY_TIME)
962 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
963 pd->stats.empty_time, cb, dname);
964 if (type == BLKIO_STAT_DEQUEUE)
965 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
966 pd->stats.dequeue, cb, dname);
969 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
971 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
973 cb->fill(cb, key_str, pd->stats.stat_arr[type][sub_type]);
975 disk_total = pd->stats.stat_arr[type][BLKIO_STAT_READ] +
976 pd->stats.stat_arr[type][BLKIO_STAT_WRITE];
977 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
979 cb->fill(cb, key_str, disk_total);
983 static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
984 int fileid, struct blkio_cgroup *blkcg)
986 struct gendisk *disk = NULL;
987 struct blkio_group *blkg = NULL;
988 struct blkg_policy_data *pd;
989 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
990 unsigned long major, minor;
991 int i = 0, ret = -EINVAL;
996 memset(s, 0, sizeof(s));
998 while ((p = strsep(&buf, " ")) != NULL) {
1004 /* Prevent from inputing too many things */
1012 p = strsep(&s[0], ":");
1022 if (strict_strtoul(major_s, 10, &major))
1025 if (strict_strtoul(minor_s, 10, &minor))
1028 dev = MKDEV(major, minor);
1030 if (strict_strtoull(s[1], 10, &temp))
1033 disk = get_gendisk(dev, &part);
1039 spin_lock_irq(disk->queue->queue_lock);
1040 blkg = blkg_lookup_create(blkcg, disk->queue, plid, false);
1041 spin_unlock_irq(disk->queue->queue_lock);
1044 ret = PTR_ERR(blkg);
1048 pd = blkg->pd[plid];
1051 case BLKIO_POLICY_PROP:
1052 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
1053 temp > BLKIO_WEIGHT_MAX)
1056 pd->conf.weight = temp;
1057 blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight);
1059 case BLKIO_POLICY_THROTL:
1061 case BLKIO_THROTL_read_bps_device:
1062 pd->conf.bps[READ] = temp;
1063 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
1065 case BLKIO_THROTL_write_bps_device:
1066 pd->conf.bps[WRITE] = temp;
1067 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
1069 case BLKIO_THROTL_read_iops_device:
1070 if (temp > THROTL_IOPS_MAX)
1072 pd->conf.iops[READ] = temp;
1073 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1075 case BLKIO_THROTL_write_iops_device:
1076 if (temp > THROTL_IOPS_MAX)
1078 pd->conf.iops[WRITE] = temp;
1079 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1093 * If queue was bypassing, we should retry. Do so after a short
1094 * msleep(). It isn't strictly necessary but queue can be
1095 * bypassing for some time and it's always nice to avoid busy
1098 if (ret == -EBUSY) {
1100 return restart_syscall();
1105 static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1110 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1111 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1112 int fileid = BLKIOFILE_ATTR(cft->private);
1114 buf = kstrdup(buffer, GFP_KERNEL);
1118 ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
1123 static const char *blkg_dev_name(struct blkio_group *blkg)
1125 /* some drivers (floppy) instantiate a queue w/o disk registered */
1126 if (blkg->q->backing_dev_info.dev)
1127 return dev_name(blkg->q->backing_dev_info.dev);
1131 static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
1134 int plid = BLKIOFILE_POLICY(cft->private);
1135 int fileid = BLKIOFILE_ATTR(cft->private);
1136 struct blkg_policy_data *pd = blkg->pd[plid];
1137 const char *dname = blkg_dev_name(blkg);
1144 case BLKIO_POLICY_PROP:
1145 if (pd->conf.weight)
1146 seq_printf(m, "%s\t%u\n",
1147 dname, pd->conf.weight);
1149 case BLKIO_POLICY_THROTL:
1151 case BLKIO_THROTL_read_bps_device:
1153 case BLKIO_THROTL_write_bps_device:
1154 if (pd->conf.bps[rw])
1155 seq_printf(m, "%s\t%llu\n",
1156 dname, pd->conf.bps[rw]);
1158 case BLKIO_THROTL_read_iops_device:
1160 case BLKIO_THROTL_write_iops_device:
1161 if (pd->conf.iops[rw])
1162 seq_printf(m, "%s\t%u\n",
1163 dname, pd->conf.iops[rw]);
1172 /* cgroup files which read their data from policy nodes end up here */
1173 static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
1176 struct blkio_group *blkg;
1177 struct hlist_node *n;
1179 spin_lock_irq(&blkcg->lock);
1180 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
1181 blkio_print_group_conf(cft, blkg, m);
1182 spin_unlock_irq(&blkcg->lock);
1185 static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1188 struct blkio_cgroup *blkcg;
1189 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1190 int name = BLKIOFILE_ATTR(cft->private);
1192 blkcg = cgroup_to_blkio_cgroup(cgrp);
1195 case BLKIO_POLICY_PROP:
1197 case BLKIO_PROP_weight_device:
1198 blkio_read_conf(cft, blkcg, m);
1204 case BLKIO_POLICY_THROTL:
1206 case BLKIO_THROTL_read_bps_device:
1207 case BLKIO_THROTL_write_bps_device:
1208 case BLKIO_THROTL_read_iops_device:
1209 case BLKIO_THROTL_write_iops_device:
1210 blkio_read_conf(cft, blkcg, m);
1223 static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1224 struct cftype *cft, struct cgroup_map_cb *cb,
1225 enum stat_type type, bool show_total, bool pcpu)
1227 struct blkio_group *blkg;
1228 struct hlist_node *n;
1229 uint64_t cgroup_total = 0;
1231 spin_lock_irq(&blkcg->lock);
1233 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1234 const char *dname = blkg_dev_name(blkg);
1235 int plid = BLKIOFILE_POLICY(cft->private);
1240 cgroup_total += blkio_get_stat_cpu(blkg, plid,
1243 spin_lock(&blkg->stats_lock);
1244 cgroup_total += blkio_get_stat(blkg, plid,
1246 spin_unlock(&blkg->stats_lock);
1250 cb->fill(cb, "Total", cgroup_total);
1252 spin_unlock_irq(&blkcg->lock);
1256 /* All map kind of cgroup file get serviced by this function */
1257 static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1258 struct cgroup_map_cb *cb)
1260 struct blkio_cgroup *blkcg;
1261 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1262 int name = BLKIOFILE_ATTR(cft->private);
1264 blkcg = cgroup_to_blkio_cgroup(cgrp);
1267 case BLKIO_POLICY_PROP:
1269 case BLKIO_PROP_time:
1270 return blkio_read_blkg_stats(blkcg, cft, cb,
1271 BLKIO_STAT_TIME, 0, 0);
1272 case BLKIO_PROP_sectors:
1273 return blkio_read_blkg_stats(blkcg, cft, cb,
1274 BLKIO_STAT_CPU_SECTORS, 0, 1);
1275 case BLKIO_PROP_io_service_bytes:
1276 return blkio_read_blkg_stats(blkcg, cft, cb,
1277 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1278 case BLKIO_PROP_io_serviced:
1279 return blkio_read_blkg_stats(blkcg, cft, cb,
1280 BLKIO_STAT_CPU_SERVICED, 1, 1);
1281 case BLKIO_PROP_io_service_time:
1282 return blkio_read_blkg_stats(blkcg, cft, cb,
1283 BLKIO_STAT_SERVICE_TIME, 1, 0);
1284 case BLKIO_PROP_io_wait_time:
1285 return blkio_read_blkg_stats(blkcg, cft, cb,
1286 BLKIO_STAT_WAIT_TIME, 1, 0);
1287 case BLKIO_PROP_io_merged:
1288 return blkio_read_blkg_stats(blkcg, cft, cb,
1289 BLKIO_STAT_CPU_MERGED, 1, 1);
1290 case BLKIO_PROP_io_queued:
1291 return blkio_read_blkg_stats(blkcg, cft, cb,
1292 BLKIO_STAT_QUEUED, 1, 0);
1293 #ifdef CONFIG_DEBUG_BLK_CGROUP
1294 case BLKIO_PROP_unaccounted_time:
1295 return blkio_read_blkg_stats(blkcg, cft, cb,
1296 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
1297 case BLKIO_PROP_dequeue:
1298 return blkio_read_blkg_stats(blkcg, cft, cb,
1299 BLKIO_STAT_DEQUEUE, 0, 0);
1300 case BLKIO_PROP_avg_queue_size:
1301 return blkio_read_blkg_stats(blkcg, cft, cb,
1302 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
1303 case BLKIO_PROP_group_wait_time:
1304 return blkio_read_blkg_stats(blkcg, cft, cb,
1305 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
1306 case BLKIO_PROP_idle_time:
1307 return blkio_read_blkg_stats(blkcg, cft, cb,
1308 BLKIO_STAT_IDLE_TIME, 0, 0);
1309 case BLKIO_PROP_empty_time:
1310 return blkio_read_blkg_stats(blkcg, cft, cb,
1311 BLKIO_STAT_EMPTY_TIME, 0, 0);
1317 case BLKIO_POLICY_THROTL:
1319 case BLKIO_THROTL_io_service_bytes:
1320 return blkio_read_blkg_stats(blkcg, cft, cb,
1321 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1322 case BLKIO_THROTL_io_serviced:
1323 return blkio_read_blkg_stats(blkcg, cft, cb,
1324 BLKIO_STAT_CPU_SERVICED, 1, 1);
1336 static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
1338 struct blkio_group *blkg;
1339 struct hlist_node *n;
1341 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1344 spin_lock(&blkio_list_lock);
1345 spin_lock_irq(&blkcg->lock);
1346 blkcg->weight = (unsigned int)val;
1348 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1349 struct blkg_policy_data *pd = blkg->pd[plid];
1351 if (!pd->conf.weight)
1352 blkio_update_group_weight(blkg, plid, blkcg->weight);
1355 spin_unlock_irq(&blkcg->lock);
1356 spin_unlock(&blkio_list_lock);
1360 static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1361 struct blkio_cgroup *blkcg;
1362 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1363 int name = BLKIOFILE_ATTR(cft->private);
1365 blkcg = cgroup_to_blkio_cgroup(cgrp);
1368 case BLKIO_POLICY_PROP:
1370 case BLKIO_PROP_weight:
1371 return (u64)blkcg->weight;
1381 blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1383 struct blkio_cgroup *blkcg;
1384 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1385 int name = BLKIOFILE_ATTR(cft->private);
1387 blkcg = cgroup_to_blkio_cgroup(cgrp);
1390 case BLKIO_POLICY_PROP:
1392 case BLKIO_PROP_weight:
1393 return blkio_weight_write(blkcg, plid, val);
1403 struct cftype blkio_files[] = {
1405 .name = "weight_device",
1406 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1407 BLKIO_PROP_weight_device),
1408 .read_seq_string = blkiocg_file_read,
1409 .write_string = blkiocg_file_write,
1410 .max_write_len = 256,
1414 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1416 .read_u64 = blkiocg_file_read_u64,
1417 .write_u64 = blkiocg_file_write_u64,
1421 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1423 .read_map = blkiocg_file_read_map,
1427 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1428 BLKIO_PROP_sectors),
1429 .read_map = blkiocg_file_read_map,
1432 .name = "io_service_bytes",
1433 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1434 BLKIO_PROP_io_service_bytes),
1435 .read_map = blkiocg_file_read_map,
1438 .name = "io_serviced",
1439 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1440 BLKIO_PROP_io_serviced),
1441 .read_map = blkiocg_file_read_map,
1444 .name = "io_service_time",
1445 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1446 BLKIO_PROP_io_service_time),
1447 .read_map = blkiocg_file_read_map,
1450 .name = "io_wait_time",
1451 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1452 BLKIO_PROP_io_wait_time),
1453 .read_map = blkiocg_file_read_map,
1456 .name = "io_merged",
1457 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1458 BLKIO_PROP_io_merged),
1459 .read_map = blkiocg_file_read_map,
1462 .name = "io_queued",
1463 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1464 BLKIO_PROP_io_queued),
1465 .read_map = blkiocg_file_read_map,
1468 .name = "reset_stats",
1469 .write_u64 = blkiocg_reset_stats,
1471 #ifdef CONFIG_BLK_DEV_THROTTLING
1473 .name = "throttle.read_bps_device",
1474 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1475 BLKIO_THROTL_read_bps_device),
1476 .read_seq_string = blkiocg_file_read,
1477 .write_string = blkiocg_file_write,
1478 .max_write_len = 256,
1482 .name = "throttle.write_bps_device",
1483 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1484 BLKIO_THROTL_write_bps_device),
1485 .read_seq_string = blkiocg_file_read,
1486 .write_string = blkiocg_file_write,
1487 .max_write_len = 256,
1491 .name = "throttle.read_iops_device",
1492 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1493 BLKIO_THROTL_read_iops_device),
1494 .read_seq_string = blkiocg_file_read,
1495 .write_string = blkiocg_file_write,
1496 .max_write_len = 256,
1500 .name = "throttle.write_iops_device",
1501 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1502 BLKIO_THROTL_write_iops_device),
1503 .read_seq_string = blkiocg_file_read,
1504 .write_string = blkiocg_file_write,
1505 .max_write_len = 256,
1508 .name = "throttle.io_service_bytes",
1509 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1510 BLKIO_THROTL_io_service_bytes),
1511 .read_map = blkiocg_file_read_map,
1514 .name = "throttle.io_serviced",
1515 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1516 BLKIO_THROTL_io_serviced),
1517 .read_map = blkiocg_file_read_map,
1519 #endif /* CONFIG_BLK_DEV_THROTTLING */
1521 #ifdef CONFIG_DEBUG_BLK_CGROUP
1523 .name = "avg_queue_size",
1524 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1525 BLKIO_PROP_avg_queue_size),
1526 .read_map = blkiocg_file_read_map,
1529 .name = "group_wait_time",
1530 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1531 BLKIO_PROP_group_wait_time),
1532 .read_map = blkiocg_file_read_map,
1535 .name = "idle_time",
1536 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1537 BLKIO_PROP_idle_time),
1538 .read_map = blkiocg_file_read_map,
1541 .name = "empty_time",
1542 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1543 BLKIO_PROP_empty_time),
1544 .read_map = blkiocg_file_read_map,
1548 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1549 BLKIO_PROP_dequeue),
1550 .read_map = blkiocg_file_read_map,
1553 .name = "unaccounted_time",
1554 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1555 BLKIO_PROP_unaccounted_time),
1556 .read_map = blkiocg_file_read_map,
1561 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1563 return cgroup_add_files(cgroup, subsys, blkio_files,
1564 ARRAY_SIZE(blkio_files));
1568 * blkiocg_pre_destroy - cgroup pre_destroy callback
1569 * @subsys: cgroup subsys
1570 * @cgroup: cgroup of interest
1572 * This function is called when @cgroup is about to go away and responsible
1573 * for shooting down all blkgs associated with @cgroup. blkgs should be
1574 * removed while holding both q and blkcg locks. As blkcg lock is nested
1575 * inside q lock, this function performs reverse double lock dancing.
1577 * This is the blkcg counterpart of ioc_release_fn().
1579 static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
1580 struct cgroup *cgroup)
1582 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1584 spin_lock_irq(&blkcg->lock);
1586 while (!hlist_empty(&blkcg->blkg_list)) {
1587 struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
1588 struct blkio_group, blkcg_node);
1589 struct request_queue *q = blkg->q;
1591 if (spin_trylock(q->queue_lock)) {
1593 spin_unlock(q->queue_lock);
1595 spin_unlock_irq(&blkcg->lock);
1597 spin_lock(&blkcg->lock);
1601 spin_unlock_irq(&blkcg->lock);
1605 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1607 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1609 if (blkcg != &blkio_root_cgroup)
1613 static struct cgroup_subsys_state *
1614 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1616 struct blkio_cgroup *blkcg;
1617 struct cgroup *parent = cgroup->parent;
1620 blkcg = &blkio_root_cgroup;
1624 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1626 return ERR_PTR(-ENOMEM);
1628 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1630 spin_lock_init(&blkcg->lock);
1631 INIT_HLIST_HEAD(&blkcg->blkg_list);
1637 * blkcg_init_queue - initialize blkcg part of request queue
1638 * @q: request_queue to initialize
1640 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1641 * part of new request_queue @q.
1644 * 0 on success, -errno on failure.
1646 int blkcg_init_queue(struct request_queue *q)
1652 ret = blk_throtl_init(q);
1656 mutex_lock(&all_q_mutex);
1657 INIT_LIST_HEAD(&q->all_q_node);
1658 list_add_tail(&q->all_q_node, &all_q_list);
1659 mutex_unlock(&all_q_mutex);
1665 * blkcg_drain_queue - drain blkcg part of request_queue
1666 * @q: request_queue to drain
1668 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1670 void blkcg_drain_queue(struct request_queue *q)
1672 lockdep_assert_held(q->queue_lock);
1674 blk_throtl_drain(q);
1678 * blkcg_exit_queue - exit and release blkcg part of request_queue
1679 * @q: request_queue being released
1681 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1683 void blkcg_exit_queue(struct request_queue *q)
1685 mutex_lock(&all_q_mutex);
1686 list_del_init(&q->all_q_node);
1687 mutex_unlock(&all_q_mutex);
1689 blkg_destroy_all(q, true);
1695 * We cannot support shared io contexts, as we have no mean to support
1696 * two tasks with the same ioc in two different groups without major rework
1697 * of the main cic data structures. For now we allow a task to change
1698 * its cgroup only if it's the only owner of its ioc.
1700 static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1701 struct cgroup_taskset *tset)
1703 struct task_struct *task;
1704 struct io_context *ioc;
1707 /* task_lock() is needed to avoid races with exit_io_context() */
1708 cgroup_taskset_for_each(task, cgrp, tset) {
1710 ioc = task->io_context;
1711 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1720 static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1721 struct cgroup_taskset *tset)
1723 struct task_struct *task;
1724 struct io_context *ioc;
1726 cgroup_taskset_for_each(task, cgrp, tset) {
1727 /* we don't lose anything even if ioc allocation fails */
1728 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
1730 ioc_cgroup_changed(ioc);
1731 put_io_context(ioc);
1736 static void blkcg_bypass_start(void)
1737 __acquires(&all_q_mutex)
1739 struct request_queue *q;
1741 mutex_lock(&all_q_mutex);
1743 list_for_each_entry(q, &all_q_list, all_q_node) {
1744 blk_queue_bypass_start(q);
1745 blkg_destroy_all(q, false);
1749 static void blkcg_bypass_end(void)
1750 __releases(&all_q_mutex)
1752 struct request_queue *q;
1754 list_for_each_entry(q, &all_q_list, all_q_node)
1755 blk_queue_bypass_end(q);
1757 mutex_unlock(&all_q_mutex);
1760 void blkio_policy_register(struct blkio_policy_type *blkiop)
1762 struct request_queue *q;
1764 blkcg_bypass_start();
1765 spin_lock(&blkio_list_lock);
1767 BUG_ON(blkio_policy[blkiop->plid]);
1768 blkio_policy[blkiop->plid] = blkiop;
1769 list_add_tail(&blkiop->list, &blkio_list);
1771 spin_unlock(&blkio_list_lock);
1772 list_for_each_entry(q, &all_q_list, all_q_node)
1773 update_root_blkg_pd(q, blkiop->plid);
1776 EXPORT_SYMBOL_GPL(blkio_policy_register);
1778 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1780 struct request_queue *q;
1782 blkcg_bypass_start();
1783 spin_lock(&blkio_list_lock);
1785 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
1786 blkio_policy[blkiop->plid] = NULL;
1787 list_del_init(&blkiop->list);
1789 spin_unlock(&blkio_list_lock);
1790 list_for_each_entry(q, &all_q_list, all_q_node)
1791 update_root_blkg_pd(q, blkiop->plid);
1794 EXPORT_SYMBOL_GPL(blkio_policy_unregister);