blk-mq-sched: add framework for MQ capable IO schedulers
[linux-2.6-block.git] / block / blk-cgroup.c
index 8ba0af780e880e1c6ed72d772ba77dea5a79f436..2630f64bed197f549561b8f09c68143f397f3882 100644 (file)
@@ -1223,7 +1223,11 @@ int blkcg_activate_policy(struct request_queue *q,
        if (blkcg_policy_enabled(q, pol))
                return 0;
 
-       blk_queue_bypass_start(q);
+       if (q->mq_ops) {
+               blk_mq_freeze_queue(q);
+               blk_mq_quiesce_queue(q);
+       } else
+               blk_queue_bypass_start(q);
 pd_prealloc:
        if (!pd_prealloc) {
                pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
@@ -1261,7 +1265,10 @@ pd_prealloc:
 
        spin_unlock_irq(q->queue_lock);
 out_bypass_end:
-       blk_queue_bypass_end(q);
+       if (q->mq_ops)
+               blk_mq_unfreeze_queue(q);
+       else
+               blk_queue_bypass_end(q);
        if (pd_prealloc)
                pol->pd_free_fn(pd_prealloc);
        return ret;
@@ -1284,7 +1291,12 @@ void blkcg_deactivate_policy(struct request_queue *q,
        if (!blkcg_policy_enabled(q, pol))
                return;
 
-       blk_queue_bypass_start(q);
+       if (q->mq_ops) {
+               blk_mq_freeze_queue(q);
+               blk_mq_quiesce_queue(q);
+       } else
+               blk_queue_bypass_start(q);
+
        spin_lock_irq(q->queue_lock);
 
        __clear_bit(pol->plid, q->blkcg_pols);
@@ -1304,7 +1316,11 @@ void blkcg_deactivate_policy(struct request_queue *q,
        }
 
        spin_unlock_irq(q->queue_lock);
-       blk_queue_bypass_end(q);
+
+       if (q->mq_ops)
+               blk_mq_unfreeze_queue(q);
+       else
+               blk_queue_bypass_end(q);
 }
 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);