block: avoid calling blkg_free() in atomic context
[linux-block.git] / block / blk-cgroup.c
index 650f7e27989f1989fa42da3f0a85d21aed221120..6ed43fc0e6ab0460f28839b88f8a8c702f4366d2 100644 (file)
 #include <linux/blkdev.h>
 #include <linux/backing-dev.h>
 #include <linux/slab.h>
-#include <linux/genhd.h>
 #include <linux/delay.h>
 #include <linux/atomic.h>
 #include <linux/ctype.h>
-#include <linux/blk-cgroup.h>
 #include <linux/tracehook.h>
 #include <linux/psi.h>
 #include <linux/part_stat.h>
 #include "blk.h"
+#include "blk-cgroup.h"
 #include "blk-ioprio.h"
 #include "blk-throttle.h"
 
@@ -66,6 +65,23 @@ static bool blkcg_policy_enabled(struct request_queue *q,
        return pol && test_bit(pol->plid, q->blkcg_pols);
 }
 
+static void blkg_free_workfn(struct work_struct *work)
+{
+       struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
+                                            free_work);
+       int i;
+
+       for (i = 0; i < BLKCG_MAX_POLS; i++)
+               if (blkg->pd[i])
+                       blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
+
+       if (blkg->q)
+               blk_put_queue(blkg->q);
+       free_percpu(blkg->iostat_cpu);
+       percpu_ref_exit(&blkg->refcnt);
+       kfree(blkg);
+}
+
 /**
  * blkg_free - free a blkg
  * @blkg: blkg to free
@@ -74,18 +90,15 @@ static bool blkcg_policy_enabled(struct request_queue *q,
  */
 static void blkg_free(struct blkcg_gq *blkg)
 {
-       int i;
-
        if (!blkg)
                return;
 
-       for (i = 0; i < BLKCG_MAX_POLS; i++)
-               if (blkg->pd[i])
-                       blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
-
-       free_percpu(blkg->iostat_cpu);
-       percpu_ref_exit(&blkg->refcnt);
-       kfree(blkg);
+       /*
+        * Both ->pd_free_fn() and request queue's release handler may
+        * sleep, so free us by scheduling one work func
+        */
+       INIT_WORK(&blkg->free_work, blkg_free_workfn);
+       schedule_work(&blkg->free_work);
 }
 
 static void __blkg_release(struct rcu_head *rcu)
@@ -168,6 +181,9 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
        if (!blkg->iostat_cpu)
                goto err_free;
 
+       if (!blk_get_queue(q))
+               goto err_free;
+
        blkg->q = q;
        INIT_LIST_HEAD(&blkg->q_node);
        spin_lock_init(&blkg->async_bio_lock);
@@ -857,11 +873,11 @@ static void blkcg_fill_root_iostats(void)
                        blk_queue_root_blkg(bdev_get_queue(bdev));
                struct blkg_iostat tmp;
                int cpu;
+               unsigned long flags;
 
                memset(&tmp, 0, sizeof(tmp));
                for_each_possible_cpu(cpu) {
                        struct disk_stats *cpu_dkstats;
-                       unsigned long flags;
 
                        cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
                        tmp.ios[BLKG_IOSTAT_READ] +=
@@ -877,11 +893,11 @@ static void blkcg_fill_root_iostats(void)
                                cpu_dkstats->sectors[STAT_WRITE] << 9;
                        tmp.bytes[BLKG_IOSTAT_DISCARD] +=
                                cpu_dkstats->sectors[STAT_DISCARD] << 9;
-
-                       flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
-                       blkg_iostat_set(&blkg->iostat.cur, &tmp);
-                       u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
                }
+
+               flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
+               blkg_iostat_set(&blkg->iostat.cur, &tmp);
+               u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
        }
 }
 
@@ -1176,6 +1192,8 @@ int blkcg_init_queue(struct request_queue *q)
        bool preloaded;
        int ret;
 
+       INIT_LIST_HEAD(&q->blkg_list);
+
        new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
        if (!new_blkg)
                return -ENOMEM;