2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
23 #define MAX_KEY_LEN 100
25 static DEFINE_SPINLOCK(blkio_list_lock);
26 static LIST_HEAD(blkio_list);
28 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
31 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
33 static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
34 struct task_struct *, bool);
35 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
36 struct cgroup *, struct task_struct *, bool);
37 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
38 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
40 /* for encoding cft->private value on file */
41 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
42 /* What policy owns the file, proportional or throttle */
43 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
44 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
46 struct cgroup_subsys blkio_subsys = {
48 .create = blkiocg_create,
49 .can_attach = blkiocg_can_attach,
50 .attach = blkiocg_attach,
51 .destroy = blkiocg_destroy,
52 .populate = blkiocg_populate,
53 #ifdef CONFIG_BLK_CGROUP
54 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
55 .subsys_id = blkio_subsys_id,
58 .module = THIS_MODULE,
60 EXPORT_SYMBOL_GPL(blkio_subsys);
62 static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
63 struct blkio_policy_node *pn)
65 list_add(&pn->node, &blkcg->policy_list);
68 static inline bool cftype_blkg_same_policy(struct cftype *cft,
69 struct blkio_group *blkg)
71 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
73 if (blkg->plid == plid)
79 /* Determines if policy node matches cgroup file being accessed */
80 static inline bool pn_matches_cftype(struct cftype *cft,
81 struct blkio_policy_node *pn)
83 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
84 int fileid = BLKIOFILE_ATTR(cft->private);
86 return (plid == pn->plid && fileid == pn->fileid);
89 /* Must be called with blkcg->lock held */
90 static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
95 /* Must be called with blkcg->lock held */
96 static struct blkio_policy_node *
97 blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
98 enum blkio_policy_id plid, int fileid)
100 struct blkio_policy_node *pn;
102 list_for_each_entry(pn, &blkcg->policy_list, node) {
103 if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
110 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
112 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
113 struct blkio_cgroup, css);
115 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
118 blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
120 struct blkio_policy_type *blkiop;
122 list_for_each_entry(blkiop, &blkio_list, list) {
123 /* If this policy does not own the blkg, do not send updates */
124 if (blkiop->plid != blkg->plid)
126 if (blkiop->ops.blkio_update_group_weight_fn)
127 blkiop->ops.blkio_update_group_weight_fn(blkg->key,
132 static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
135 struct blkio_policy_type *blkiop;
137 list_for_each_entry(blkiop, &blkio_list, list) {
139 /* If this policy does not own the blkg, do not send updates */
140 if (blkiop->plid != blkg->plid)
143 if (fileid == BLKIO_THROTL_read_bps_device
144 && blkiop->ops.blkio_update_group_read_bps_fn)
145 blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
148 if (fileid == BLKIO_THROTL_write_bps_device
149 && blkiop->ops.blkio_update_group_write_bps_fn)
150 blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
155 static inline void blkio_update_group_iops(struct blkio_group *blkg,
156 unsigned int iops, int fileid)
158 struct blkio_policy_type *blkiop;
160 list_for_each_entry(blkiop, &blkio_list, list) {
162 /* If this policy does not own the blkg, do not send updates */
163 if (blkiop->plid != blkg->plid)
166 if (fileid == BLKIO_THROTL_read_iops_device
167 && blkiop->ops.blkio_update_group_read_iops_fn)
168 blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
171 if (fileid == BLKIO_THROTL_write_iops_device
172 && blkiop->ops.blkio_update_group_write_iops_fn)
173 blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
179 * Add to the appropriate stat variable depending on the request type.
180 * This should be called with the blkg->stats_lock held.
182 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
186 stat[BLKIO_STAT_WRITE] += add;
188 stat[BLKIO_STAT_READ] += add;
190 stat[BLKIO_STAT_SYNC] += add;
192 stat[BLKIO_STAT_ASYNC] += add;
196 * Decrements the appropriate stat variable if non-zero depending on the
197 * request type. Panics on value being zero.
198 * This should be called with the blkg->stats_lock held.
200 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
203 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
204 stat[BLKIO_STAT_WRITE]--;
206 BUG_ON(stat[BLKIO_STAT_READ] == 0);
207 stat[BLKIO_STAT_READ]--;
210 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
211 stat[BLKIO_STAT_SYNC]--;
213 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
214 stat[BLKIO_STAT_ASYNC]--;
218 #ifdef CONFIG_DEBUG_BLK_CGROUP
219 /* This should be called with the blkg->stats_lock held. */
220 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
221 struct blkio_group *curr_blkg)
223 if (blkio_blkg_waiting(&blkg->stats))
225 if (blkg == curr_blkg)
227 blkg->stats.start_group_wait_time = sched_clock();
228 blkio_mark_blkg_waiting(&blkg->stats);
231 /* This should be called with the blkg->stats_lock held. */
232 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
234 unsigned long long now;
236 if (!blkio_blkg_waiting(stats))
240 if (time_after64(now, stats->start_group_wait_time))
241 stats->group_wait_time += now - stats->start_group_wait_time;
242 blkio_clear_blkg_waiting(stats);
245 /* This should be called with the blkg->stats_lock held. */
246 static void blkio_end_empty_time(struct blkio_group_stats *stats)
248 unsigned long long now;
250 if (!blkio_blkg_empty(stats))
254 if (time_after64(now, stats->start_empty_time))
255 stats->empty_time += now - stats->start_empty_time;
256 blkio_clear_blkg_empty(stats);
259 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
263 spin_lock_irqsave(&blkg->stats_lock, flags);
264 BUG_ON(blkio_blkg_idling(&blkg->stats));
265 blkg->stats.start_idle_time = sched_clock();
266 blkio_mark_blkg_idling(&blkg->stats);
267 spin_unlock_irqrestore(&blkg->stats_lock, flags);
269 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
271 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
274 unsigned long long now;
275 struct blkio_group_stats *stats;
277 spin_lock_irqsave(&blkg->stats_lock, flags);
278 stats = &blkg->stats;
279 if (blkio_blkg_idling(stats)) {
281 if (time_after64(now, stats->start_idle_time))
282 stats->idle_time += now - stats->start_idle_time;
283 blkio_clear_blkg_idling(stats);
285 spin_unlock_irqrestore(&blkg->stats_lock, flags);
287 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
289 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
292 struct blkio_group_stats *stats;
294 spin_lock_irqsave(&blkg->stats_lock, flags);
295 stats = &blkg->stats;
296 stats->avg_queue_size_sum +=
297 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
298 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
299 stats->avg_queue_size_samples++;
300 blkio_update_group_wait_time(stats);
301 spin_unlock_irqrestore(&blkg->stats_lock, flags);
303 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
305 void blkiocg_set_start_empty_time(struct blkio_group *blkg)
308 struct blkio_group_stats *stats;
310 spin_lock_irqsave(&blkg->stats_lock, flags);
311 stats = &blkg->stats;
313 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
314 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
315 spin_unlock_irqrestore(&blkg->stats_lock, flags);
320 * group is already marked empty. This can happen if cfqq got new
321 * request in parent group and moved to this group while being added
322 * to service tree. Just ignore the event and move on.
324 if(blkio_blkg_empty(stats)) {
325 spin_unlock_irqrestore(&blkg->stats_lock, flags);
329 stats->start_empty_time = sched_clock();
330 blkio_mark_blkg_empty(stats);
331 spin_unlock_irqrestore(&blkg->stats_lock, flags);
333 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
335 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
336 unsigned long dequeue)
338 blkg->stats.dequeue += dequeue;
340 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
342 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
343 struct blkio_group *curr_blkg) {}
344 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
347 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
348 struct blkio_group *curr_blkg, bool direction,
353 spin_lock_irqsave(&blkg->stats_lock, flags);
354 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
356 blkio_end_empty_time(&blkg->stats);
357 blkio_set_start_group_wait_time(blkg, curr_blkg);
358 spin_unlock_irqrestore(&blkg->stats_lock, flags);
360 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
362 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
363 bool direction, bool sync)
367 spin_lock_irqsave(&blkg->stats_lock, flags);
368 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
370 spin_unlock_irqrestore(&blkg->stats_lock, flags);
372 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
374 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
378 spin_lock_irqsave(&blkg->stats_lock, flags);
379 blkg->stats.time += time;
380 spin_unlock_irqrestore(&blkg->stats_lock, flags);
382 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
384 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
385 uint64_t bytes, bool direction, bool sync)
387 struct blkio_group_stats *stats;
390 spin_lock_irqsave(&blkg->stats_lock, flags);
391 stats = &blkg->stats;
392 stats->sectors += bytes >> 9;
393 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
395 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
397 spin_unlock_irqrestore(&blkg->stats_lock, flags);
399 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
401 void blkiocg_update_completion_stats(struct blkio_group *blkg,
402 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
404 struct blkio_group_stats *stats;
406 unsigned long long now = sched_clock();
408 spin_lock_irqsave(&blkg->stats_lock, flags);
409 stats = &blkg->stats;
410 if (time_after64(now, io_start_time))
411 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
412 now - io_start_time, direction, sync);
413 if (time_after64(io_start_time, start_time))
414 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
415 io_start_time - start_time, direction, sync);
416 spin_unlock_irqrestore(&blkg->stats_lock, flags);
418 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
420 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
425 spin_lock_irqsave(&blkg->stats_lock, flags);
426 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
428 spin_unlock_irqrestore(&blkg->stats_lock, flags);
430 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
432 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
433 struct blkio_group *blkg, void *key, dev_t dev,
434 enum blkio_policy_id plid)
438 spin_lock_irqsave(&blkcg->lock, flags);
439 spin_lock_init(&blkg->stats_lock);
440 rcu_assign_pointer(blkg->key, key);
441 blkg->blkcg_id = css_id(&blkcg->css);
442 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
444 spin_unlock_irqrestore(&blkcg->lock, flags);
445 /* Need to take css reference ? */
446 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
449 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
451 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
453 hlist_del_init_rcu(&blkg->blkcg_node);
458 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
459 * indicating that blk_group was unhashed by the time we got to it.
461 int blkiocg_del_blkio_group(struct blkio_group *blkg)
463 struct blkio_cgroup *blkcg;
465 struct cgroup_subsys_state *css;
469 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
471 blkcg = container_of(css, struct blkio_cgroup, css);
472 spin_lock_irqsave(&blkcg->lock, flags);
473 if (!hlist_unhashed(&blkg->blkcg_node)) {
474 __blkiocg_del_blkio_group(blkg);
477 spin_unlock_irqrestore(&blkcg->lock, flags);
483 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
485 /* called under rcu_read_lock(). */
486 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
488 struct blkio_group *blkg;
489 struct hlist_node *n;
492 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
500 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
503 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
505 struct blkio_cgroup *blkcg;
506 struct blkio_group *blkg;
507 struct blkio_group_stats *stats;
508 struct hlist_node *n;
509 uint64_t queued[BLKIO_STAT_TOTAL];
511 #ifdef CONFIG_DEBUG_BLK_CGROUP
512 bool idling, waiting, empty;
513 unsigned long long now = sched_clock();
516 blkcg = cgroup_to_blkio_cgroup(cgroup);
517 spin_lock_irq(&blkcg->lock);
518 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
519 spin_lock(&blkg->stats_lock);
520 stats = &blkg->stats;
521 #ifdef CONFIG_DEBUG_BLK_CGROUP
522 idling = blkio_blkg_idling(stats);
523 waiting = blkio_blkg_waiting(stats);
524 empty = blkio_blkg_empty(stats);
526 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
527 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
528 memset(stats, 0, sizeof(struct blkio_group_stats));
529 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
530 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
531 #ifdef CONFIG_DEBUG_BLK_CGROUP
533 blkio_mark_blkg_idling(stats);
534 stats->start_idle_time = now;
537 blkio_mark_blkg_waiting(stats);
538 stats->start_group_wait_time = now;
541 blkio_mark_blkg_empty(stats);
542 stats->start_empty_time = now;
545 spin_unlock(&blkg->stats_lock);
547 spin_unlock_irq(&blkcg->lock);
551 static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
552 int chars_left, bool diskname_only)
554 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
555 chars_left -= strlen(str);
556 if (chars_left <= 0) {
558 "Possibly incorrect cgroup stat display format");
564 case BLKIO_STAT_READ:
565 strlcat(str, " Read", chars_left);
567 case BLKIO_STAT_WRITE:
568 strlcat(str, " Write", chars_left);
570 case BLKIO_STAT_SYNC:
571 strlcat(str, " Sync", chars_left);
573 case BLKIO_STAT_ASYNC:
574 strlcat(str, " Async", chars_left);
576 case BLKIO_STAT_TOTAL:
577 strlcat(str, " Total", chars_left);
580 strlcat(str, " Invalid", chars_left);
584 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
585 struct cgroup_map_cb *cb, dev_t dev)
587 blkio_get_key_name(0, dev, str, chars_left, true);
588 cb->fill(cb, str, val);
592 /* This should be called with blkg->stats_lock held */
593 static uint64_t blkio_get_stat(struct blkio_group *blkg,
594 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
597 char key_str[MAX_KEY_LEN];
598 enum stat_sub_type sub_type;
600 if (type == BLKIO_STAT_TIME)
601 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
602 blkg->stats.time, cb, dev);
603 if (type == BLKIO_STAT_SECTORS)
604 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
605 blkg->stats.sectors, cb, dev);
606 #ifdef CONFIG_DEBUG_BLK_CGROUP
607 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
608 uint64_t sum = blkg->stats.avg_queue_size_sum;
609 uint64_t samples = blkg->stats.avg_queue_size_samples;
611 do_div(sum, samples);
614 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
616 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
617 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
618 blkg->stats.group_wait_time, cb, dev);
619 if (type == BLKIO_STAT_IDLE_TIME)
620 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
621 blkg->stats.idle_time, cb, dev);
622 if (type == BLKIO_STAT_EMPTY_TIME)
623 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
624 blkg->stats.empty_time, cb, dev);
625 if (type == BLKIO_STAT_DEQUEUE)
626 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
627 blkg->stats.dequeue, cb, dev);
630 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
632 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
633 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
635 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
636 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
637 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
638 cb->fill(cb, key_str, disk_total);
642 static int blkio_check_dev_num(dev_t dev)
645 struct gendisk *disk;
647 disk = get_gendisk(dev, &part);
654 static int blkio_policy_parse_and_set(char *buf,
655 struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
657 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
659 unsigned long major, minor, temp, iops;
664 memset(s, 0, sizeof(s));
666 while ((p = strsep(&buf, " ")) != NULL) {
672 /* Prevent from inputing too many things */
680 p = strsep(&s[0], ":");
690 ret = strict_strtoul(major_s, 10, &major);
694 ret = strict_strtoul(minor_s, 10, &minor);
698 dev = MKDEV(major, minor);
700 ret = blkio_check_dev_num(dev);
710 case BLKIO_POLICY_PROP:
711 ret = strict_strtoul(s[1], 10, &temp);
712 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
713 temp > BLKIO_WEIGHT_MAX)
717 newpn->fileid = fileid;
718 newpn->val.weight = temp;
720 case BLKIO_POLICY_THROTL:
722 case BLKIO_THROTL_read_bps_device:
723 case BLKIO_THROTL_write_bps_device:
724 ret = strict_strtoull(s[1], 10, &bps);
729 newpn->fileid = fileid;
730 newpn->val.bps = bps;
732 case BLKIO_THROTL_read_iops_device:
733 case BLKIO_THROTL_write_iops_device:
734 ret = strict_strtoul(s[1], 10, &iops);
739 newpn->fileid = fileid;
740 newpn->val.iops = iops;
751 unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
754 struct blkio_policy_node *pn;
756 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
757 BLKIO_PROP_weight_device);
759 return pn->val.weight;
761 return blkcg->weight;
763 EXPORT_SYMBOL_GPL(blkcg_get_weight);
765 uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
767 struct blkio_policy_node *pn;
769 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
770 BLKIO_THROTL_read_bps_device);
777 uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
779 struct blkio_policy_node *pn;
780 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
781 BLKIO_THROTL_write_bps_device);
788 unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
790 struct blkio_policy_node *pn;
792 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
793 BLKIO_THROTL_read_iops_device);
800 unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
802 struct blkio_policy_node *pn;
803 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
804 BLKIO_THROTL_write_iops_device);
811 /* Checks whether user asked for deleting a policy rule */
812 static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
815 case BLKIO_POLICY_PROP:
816 if (pn->val.weight == 0)
819 case BLKIO_POLICY_THROTL:
821 case BLKIO_THROTL_read_bps_device:
822 case BLKIO_THROTL_write_bps_device:
823 if (pn->val.bps == 0)
826 case BLKIO_THROTL_read_iops_device:
827 case BLKIO_THROTL_write_iops_device:
828 if (pn->val.iops == 0)
839 static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
840 struct blkio_policy_node *newpn)
842 switch(oldpn->plid) {
843 case BLKIO_POLICY_PROP:
844 oldpn->val.weight = newpn->val.weight;
846 case BLKIO_POLICY_THROTL:
847 switch(newpn->fileid) {
848 case BLKIO_THROTL_read_bps_device:
849 case BLKIO_THROTL_write_bps_device:
850 oldpn->val.bps = newpn->val.bps;
852 case BLKIO_THROTL_read_iops_device:
853 case BLKIO_THROTL_write_iops_device:
854 oldpn->val.iops = newpn->val.iops;
863 * Some rules/values in blkg have changed. Propogate those to respective
866 static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
867 struct blkio_group *blkg, struct blkio_policy_node *pn)
869 unsigned int weight, iops;
873 case BLKIO_POLICY_PROP:
874 weight = pn->val.weight ? pn->val.weight :
876 blkio_update_group_weight(blkg, weight);
878 case BLKIO_POLICY_THROTL:
880 case BLKIO_THROTL_read_bps_device:
881 case BLKIO_THROTL_write_bps_device:
882 bps = pn->val.bps ? pn->val.bps : (-1);
883 blkio_update_group_bps(blkg, bps, pn->fileid);
885 case BLKIO_THROTL_read_iops_device:
886 case BLKIO_THROTL_write_iops_device:
887 iops = pn->val.iops ? pn->val.iops : (-1);
888 blkio_update_group_iops(blkg, iops, pn->fileid);
898 * A policy node rule has been updated. Propogate this update to all the
899 * block groups which might be affected by this update.
901 static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
902 struct blkio_policy_node *pn)
904 struct blkio_group *blkg;
905 struct hlist_node *n;
907 spin_lock(&blkio_list_lock);
908 spin_lock_irq(&blkcg->lock);
910 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
911 if (pn->dev != blkg->dev || pn->plid != blkg->plid)
913 blkio_update_blkg_policy(blkcg, blkg, pn);
916 spin_unlock_irq(&blkcg->lock);
917 spin_unlock(&blkio_list_lock);
920 static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
925 struct blkio_policy_node *newpn, *pn;
926 struct blkio_cgroup *blkcg;
928 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
929 int fileid = BLKIOFILE_ATTR(cft->private);
931 buf = kstrdup(buffer, GFP_KERNEL);
935 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
941 ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
945 blkcg = cgroup_to_blkio_cgroup(cgrp);
947 spin_lock_irq(&blkcg->lock);
949 pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
951 if (!blkio_delete_rule_command(newpn)) {
952 blkio_policy_insert_node(blkcg, newpn);
955 spin_unlock_irq(&blkcg->lock);
956 goto update_io_group;
959 if (blkio_delete_rule_command(newpn)) {
960 blkio_policy_delete_node(pn);
961 spin_unlock_irq(&blkcg->lock);
962 goto update_io_group;
964 spin_unlock_irq(&blkcg->lock);
966 blkio_update_policy_rule(pn, newpn);
969 blkio_update_policy_node_blkg(blkcg, newpn);
980 blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
983 case BLKIO_POLICY_PROP:
984 if (pn->fileid == BLKIO_PROP_weight_device)
985 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
986 MINOR(pn->dev), pn->val.weight);
988 case BLKIO_POLICY_THROTL:
990 case BLKIO_THROTL_read_bps_device:
991 case BLKIO_THROTL_write_bps_device:
992 seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
993 MINOR(pn->dev), pn->val.bps);
995 case BLKIO_THROTL_read_iops_device:
996 case BLKIO_THROTL_write_iops_device:
997 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
998 MINOR(pn->dev), pn->val.iops);
1007 /* cgroup files which read their data from policy nodes end up here */
1008 static void blkio_read_policy_node_files(struct cftype *cft,
1009 struct blkio_cgroup *blkcg, struct seq_file *m)
1011 struct blkio_policy_node *pn;
1013 if (!list_empty(&blkcg->policy_list)) {
1014 spin_lock_irq(&blkcg->lock);
1015 list_for_each_entry(pn, &blkcg->policy_list, node) {
1016 if (!pn_matches_cftype(cft, pn))
1018 blkio_print_policy_node(m, pn);
1020 spin_unlock_irq(&blkcg->lock);
1024 static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1027 struct blkio_cgroup *blkcg;
1028 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1029 int name = BLKIOFILE_ATTR(cft->private);
1031 blkcg = cgroup_to_blkio_cgroup(cgrp);
1034 case BLKIO_POLICY_PROP:
1036 case BLKIO_PROP_weight_device:
1037 blkio_read_policy_node_files(cft, blkcg, m);
1043 case BLKIO_POLICY_THROTL:
1045 case BLKIO_THROTL_read_bps_device:
1046 case BLKIO_THROTL_write_bps_device:
1047 case BLKIO_THROTL_read_iops_device:
1048 case BLKIO_THROTL_write_iops_device:
1049 blkio_read_policy_node_files(cft, blkcg, m);
1062 static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1063 struct cftype *cft, struct cgroup_map_cb *cb, enum stat_type type,
1066 struct blkio_group *blkg;
1067 struct hlist_node *n;
1068 uint64_t cgroup_total = 0;
1071 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1073 if (!cftype_blkg_same_policy(cft, blkg))
1075 spin_lock_irq(&blkg->stats_lock);
1076 cgroup_total += blkio_get_stat(blkg, cb, blkg->dev,
1078 spin_unlock_irq(&blkg->stats_lock);
1082 cb->fill(cb, "Total", cgroup_total);
1087 /* All map kind of cgroup file get serviced by this function */
1088 static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1089 struct cgroup_map_cb *cb)
1091 struct blkio_cgroup *blkcg;
1092 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1093 int name = BLKIOFILE_ATTR(cft->private);
1095 blkcg = cgroup_to_blkio_cgroup(cgrp);
1098 case BLKIO_POLICY_PROP:
1100 case BLKIO_PROP_time:
1101 return blkio_read_blkg_stats(blkcg, cft, cb,
1102 BLKIO_STAT_TIME, 0);
1103 case BLKIO_PROP_sectors:
1104 return blkio_read_blkg_stats(blkcg, cft, cb,
1105 BLKIO_STAT_SECTORS, 0);
1106 case BLKIO_PROP_io_service_bytes:
1107 return blkio_read_blkg_stats(blkcg, cft, cb,
1108 BLKIO_STAT_SERVICE_BYTES, 1);
1109 case BLKIO_PROP_io_serviced:
1110 return blkio_read_blkg_stats(blkcg, cft, cb,
1111 BLKIO_STAT_SERVICED, 1);
1112 case BLKIO_PROP_io_service_time:
1113 return blkio_read_blkg_stats(blkcg, cft, cb,
1114 BLKIO_STAT_SERVICE_TIME, 1);
1115 case BLKIO_PROP_io_wait_time:
1116 return blkio_read_blkg_stats(blkcg, cft, cb,
1117 BLKIO_STAT_WAIT_TIME, 1);
1118 case BLKIO_PROP_io_merged:
1119 return blkio_read_blkg_stats(blkcg, cft, cb,
1120 BLKIO_STAT_MERGED, 1);
1121 case BLKIO_PROP_io_queued:
1122 return blkio_read_blkg_stats(blkcg, cft, cb,
1123 BLKIO_STAT_QUEUED, 1);
1124 #ifdef CONFIG_DEBUG_BLK_CGROUP
1125 case BLKIO_PROP_dequeue:
1126 return blkio_read_blkg_stats(blkcg, cft, cb,
1127 BLKIO_STAT_DEQUEUE, 0);
1128 case BLKIO_PROP_avg_queue_size:
1129 return blkio_read_blkg_stats(blkcg, cft, cb,
1130 BLKIO_STAT_AVG_QUEUE_SIZE, 0);
1131 case BLKIO_PROP_group_wait_time:
1132 return blkio_read_blkg_stats(blkcg, cft, cb,
1133 BLKIO_STAT_GROUP_WAIT_TIME, 0);
1134 case BLKIO_PROP_idle_time:
1135 return blkio_read_blkg_stats(blkcg, cft, cb,
1136 BLKIO_STAT_IDLE_TIME, 0);
1137 case BLKIO_PROP_empty_time:
1138 return blkio_read_blkg_stats(blkcg, cft, cb,
1139 BLKIO_STAT_EMPTY_TIME, 0);
1145 case BLKIO_POLICY_THROTL:
1147 case BLKIO_THROTL_io_service_bytes:
1148 return blkio_read_blkg_stats(blkcg, cft, cb,
1149 BLKIO_STAT_SERVICE_BYTES, 1);
1150 case BLKIO_THROTL_io_serviced:
1151 return blkio_read_blkg_stats(blkcg, cft, cb,
1152 BLKIO_STAT_SERVICED, 1);
1164 static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
1166 struct blkio_group *blkg;
1167 struct hlist_node *n;
1168 struct blkio_policy_node *pn;
1170 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1173 spin_lock(&blkio_list_lock);
1174 spin_lock_irq(&blkcg->lock);
1175 blkcg->weight = (unsigned int)val;
1177 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1178 pn = blkio_policy_search_node(blkcg, blkg->dev,
1179 BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
1183 blkio_update_group_weight(blkg, blkcg->weight);
1185 spin_unlock_irq(&blkcg->lock);
1186 spin_unlock(&blkio_list_lock);
1190 static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1191 struct blkio_cgroup *blkcg;
1192 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1193 int name = BLKIOFILE_ATTR(cft->private);
1195 blkcg = cgroup_to_blkio_cgroup(cgrp);
1198 case BLKIO_POLICY_PROP:
1200 case BLKIO_PROP_weight:
1201 return (u64)blkcg->weight;
1211 blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1213 struct blkio_cgroup *blkcg;
1214 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1215 int name = BLKIOFILE_ATTR(cft->private);
1217 blkcg = cgroup_to_blkio_cgroup(cgrp);
1220 case BLKIO_POLICY_PROP:
1222 case BLKIO_PROP_weight:
1223 return blkio_weight_write(blkcg, val);
1233 struct cftype blkio_files[] = {
1235 .name = "weight_device",
1236 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1237 BLKIO_PROP_weight_device),
1238 .read_seq_string = blkiocg_file_read,
1239 .write_string = blkiocg_file_write,
1240 .max_write_len = 256,
1244 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1246 .read_u64 = blkiocg_file_read_u64,
1247 .write_u64 = blkiocg_file_write_u64,
1251 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1253 .read_map = blkiocg_file_read_map,
1257 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1258 BLKIO_PROP_sectors),
1259 .read_map = blkiocg_file_read_map,
1262 .name = "io_service_bytes",
1263 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1264 BLKIO_PROP_io_service_bytes),
1265 .read_map = blkiocg_file_read_map,
1268 .name = "io_serviced",
1269 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1270 BLKIO_PROP_io_serviced),
1271 .read_map = blkiocg_file_read_map,
1274 .name = "io_service_time",
1275 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1276 BLKIO_PROP_io_service_time),
1277 .read_map = blkiocg_file_read_map,
1280 .name = "io_wait_time",
1281 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1282 BLKIO_PROP_io_wait_time),
1283 .read_map = blkiocg_file_read_map,
1286 .name = "io_merged",
1287 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1288 BLKIO_PROP_io_merged),
1289 .read_map = blkiocg_file_read_map,
1292 .name = "io_queued",
1293 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1294 BLKIO_PROP_io_queued),
1295 .read_map = blkiocg_file_read_map,
1298 .name = "reset_stats",
1299 .write_u64 = blkiocg_reset_stats,
1301 #ifdef CONFIG_BLK_DEV_THROTTLING
1303 .name = "throttle.read_bps_device",
1304 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1305 BLKIO_THROTL_read_bps_device),
1306 .read_seq_string = blkiocg_file_read,
1307 .write_string = blkiocg_file_write,
1308 .max_write_len = 256,
1312 .name = "throttle.write_bps_device",
1313 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1314 BLKIO_THROTL_write_bps_device),
1315 .read_seq_string = blkiocg_file_read,
1316 .write_string = blkiocg_file_write,
1317 .max_write_len = 256,
1321 .name = "throttle.read_iops_device",
1322 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1323 BLKIO_THROTL_read_iops_device),
1324 .read_seq_string = blkiocg_file_read,
1325 .write_string = blkiocg_file_write,
1326 .max_write_len = 256,
1330 .name = "throttle.write_iops_device",
1331 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1332 BLKIO_THROTL_write_iops_device),
1333 .read_seq_string = blkiocg_file_read,
1334 .write_string = blkiocg_file_write,
1335 .max_write_len = 256,
1338 .name = "throttle.io_service_bytes",
1339 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1340 BLKIO_THROTL_io_service_bytes),
1341 .read_map = blkiocg_file_read_map,
1344 .name = "throttle.io_serviced",
1345 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1346 BLKIO_THROTL_io_serviced),
1347 .read_map = blkiocg_file_read_map,
1349 #endif /* CONFIG_BLK_DEV_THROTTLING */
1351 #ifdef CONFIG_DEBUG_BLK_CGROUP
1353 .name = "avg_queue_size",
1354 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1355 BLKIO_PROP_avg_queue_size),
1356 .read_map = blkiocg_file_read_map,
1359 .name = "group_wait_time",
1360 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1361 BLKIO_PROP_group_wait_time),
1362 .read_map = blkiocg_file_read_map,
1365 .name = "idle_time",
1366 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1367 BLKIO_PROP_idle_time),
1368 .read_map = blkiocg_file_read_map,
1371 .name = "empty_time",
1372 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1373 BLKIO_PROP_empty_time),
1374 .read_map = blkiocg_file_read_map,
1378 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1379 BLKIO_PROP_dequeue),
1380 .read_map = blkiocg_file_read_map,
1385 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1387 return cgroup_add_files(cgroup, subsys, blkio_files,
1388 ARRAY_SIZE(blkio_files));
1391 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1393 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1394 unsigned long flags;
1395 struct blkio_group *blkg;
1397 struct blkio_policy_type *blkiop;
1398 struct blkio_policy_node *pn, *pntmp;
1402 spin_lock_irqsave(&blkcg->lock, flags);
1404 if (hlist_empty(&blkcg->blkg_list)) {
1405 spin_unlock_irqrestore(&blkcg->lock, flags);
1409 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1411 key = rcu_dereference(blkg->key);
1412 __blkiocg_del_blkio_group(blkg);
1414 spin_unlock_irqrestore(&blkcg->lock, flags);
1417 * This blkio_group is being unlinked as associated cgroup is
1418 * going away. Let all the IO controlling policies know about
1421 spin_lock(&blkio_list_lock);
1422 list_for_each_entry(blkiop, &blkio_list, list) {
1423 if (blkiop->plid != blkg->plid)
1425 blkiop->ops.blkio_unlink_group_fn(key, blkg);
1427 spin_unlock(&blkio_list_lock);
1430 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
1431 blkio_policy_delete_node(pn);
1435 free_css_id(&blkio_subsys, &blkcg->css);
1437 if (blkcg != &blkio_root_cgroup)
1441 static struct cgroup_subsys_state *
1442 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1444 struct blkio_cgroup *blkcg;
1445 struct cgroup *parent = cgroup->parent;
1448 blkcg = &blkio_root_cgroup;
1452 /* Currently we do not support hierarchy deeper than two level (0,1) */
1453 if (parent != cgroup->top_cgroup)
1454 return ERR_PTR(-EINVAL);
1456 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1458 return ERR_PTR(-ENOMEM);
1460 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1462 spin_lock_init(&blkcg->lock);
1463 INIT_HLIST_HEAD(&blkcg->blkg_list);
1465 INIT_LIST_HEAD(&blkcg->policy_list);
1470 * We cannot support shared io contexts, as we have no mean to support
1471 * two tasks with the same ioc in two different groups without major rework
1472 * of the main cic data structures. For now we allow a task to change
1473 * its cgroup only if it's the only owner of its ioc.
1475 static int blkiocg_can_attach(struct cgroup_subsys *subsys,
1476 struct cgroup *cgroup, struct task_struct *tsk,
1479 struct io_context *ioc;
1482 /* task_lock() is needed to avoid races with exit_io_context() */
1484 ioc = tsk->io_context;
1485 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1492 static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
1493 struct cgroup *prev, struct task_struct *tsk,
1496 struct io_context *ioc;
1499 ioc = tsk->io_context;
1501 ioc->cgroup_changed = 1;
1505 void blkio_policy_register(struct blkio_policy_type *blkiop)
1507 spin_lock(&blkio_list_lock);
1508 list_add_tail(&blkiop->list, &blkio_list);
1509 spin_unlock(&blkio_list_lock);
1511 EXPORT_SYMBOL_GPL(blkio_policy_register);
1513 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1515 spin_lock(&blkio_list_lock);
1516 list_del_init(&blkiop->list);
1517 spin_unlock(&blkio_list_lock);
1519 EXPORT_SYMBOL_GPL(blkio_policy_unregister);
1521 static int __init init_cgroup_blkio(void)
1523 return cgroup_load_subsys(&blkio_subsys);
1526 static void __exit exit_cgroup_blkio(void)
1528 cgroup_unload_subsys(&blkio_subsys);
1531 module_init(init_cgroup_blkio);
1532 module_exit(exit_cgroup_blkio);
1533 MODULE_LICENSE("GPL");