blk-mq-sched: allow setting of default IO scheduler blk-mq-sched.3
authorJens Axboe <axboe@fb.com>
Wed, 14 Dec 2016 21:44:36 +0000 (14:44 -0700)
committerJens Axboe <axboe@fb.com>
Wed, 14 Dec 2016 21:47:20 +0000 (14:47 -0700)
Signed-off-by: Jens Axboe <axboe@fb.com>
block/Kconfig.iosched
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.c
block/elevator.c
drivers/nvme/host/pci.c
include/linux/blk-mq.h

index 490ef2850faee1f6682e112a7876c35340794502..96216cf185604cfb7f2e94b2d4525c89415532a5 100644 (file)
@@ -32,12 +32,6 @@ config IOSCHED_CFQ
 
          This is the default I/O scheduler.
 
-config MQ_IOSCHED_DEADLINE
-       tristate "MQ deadline I/O scheduler"
-       default y
-       ---help---
-         MQ version of the deadline IO scheduler.
-
 config CFQ_GROUP_IOSCHED
        bool "CFQ Group Scheduling support"
        depends on IOSCHED_CFQ && BLK_CGROUP
@@ -69,6 +63,43 @@ config DEFAULT_IOSCHED
        default "cfq" if DEFAULT_CFQ
        default "noop" if DEFAULT_NOOP
 
+config MQ_IOSCHED_DEADLINE
+       tristate "MQ deadline I/O scheduler"
+       default y
+       ---help---
+         MQ version of the deadline IO scheduler.
+
+config MQ_IOSCHED_NONE
+       bool
+       default y
+
+choice
+       prompt "Default MQ I/O scheduler"
+       default MQ_IOSCHED_NONE
+       help
+         Select the I/O scheduler which will be used by default for all
+         blk-mq managed block devices.
+
+       config DEFAULT_MQ_DEADLINE
+               bool "MQ Deadline" if MQ_IOSCHED_DEADLINE=y
+
+       config DEFAULT_MQ_NONE
+               bool "None"
+
+endchoice
+
+config DEFAULT_MQ_IOSCHED
+       string
+       default "mq-deadline" if DEFAULT_MQ_DEADLINE
+       default "none" if DEFAULT_MQ_NONE
+
+config MQ_IOSCHED_ONLY_SQ
+       bool "Enable blk-mq IO scheduler only for single queue devices"
+       default y
+       help
+         Say Y here, if you only want to enable IO scheduling on block
+         devices that have a single queue registered.
+
 endmenu
 
 endif
index 02ad1725866637acaf7c53544d1f7fa3bcd54ed2..606d519b42ee8205227b3ba11abcfdcb9941c7fe 100644 (file)
@@ -373,3 +373,22 @@ void blk_mq_sched_request_inserted(struct request *rq)
        trace_block_rq_insert(rq->q, rq);
 }
 EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
+
+int blk_mq_sched_init(struct request_queue *q)
+{
+       int ret;
+
+#if defined(CONFIG_DEFAULT_MQ_NONE)
+       return 0;
+#endif
+#if defined(CONFIG_MQ_IOSCHED_ONLY_SQ)
+       if (q->nr_hw_queues > 1)
+               return 0;
+#endif
+
+       mutex_lock(&q->sysfs_lock);
+       ret = elevator_init(q, NULL);
+       mutex_unlock(&q->sysfs_lock);
+
+       return ret;
+}
index b68dccc0190e964509fa7a7f0530ae51d4be0242..e398412d3fcf686a4c01f74c9a2a62d979aad6ff 100644 (file)
@@ -28,6 +28,8 @@ void blk_mq_sched_request_inserted(struct request *rq);
 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio);
 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
 
+int blk_mq_sched_init(struct request_queue *q);
+
 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
 
 static inline bool
index d10a246a3bc7cfde948dd3b43acb13db256a8d9e..48c28e1cb42a0c89d8836abb480f9ce197f559f1 100644 (file)
@@ -2101,6 +2101,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        INIT_LIST_HEAD(&q->requeue_list);
        spin_lock_init(&q->requeue_lock);
 
+       if (!(set->flags & BLK_MQ_F_NO_SCHED))
+               blk_mq_sched_init(q);
+
        if (q->nr_hw_queues > 1)
                blk_queue_make_request(q, blk_mq_make_request);
        else
index 6d39197768c1402b4f3d8d4e0d89cfea8d4dd788..7ad906689833f4e775d7cce7e0bfb4d51f00effe 100644 (file)
@@ -219,7 +219,10 @@ int elevator_init(struct request_queue *q, char *name)
        }
 
        if (!e) {
-               e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
+               if (q->mq_ops)
+                       e = elevator_get(CONFIG_DEFAULT_MQ_IOSCHED, false);
+               else
+                       e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
                if (!e) {
                        printk(KERN_ERR
                                "Default I/O scheduler not found. " \
index d6e6bce93d0c9e5627266ec9f41c9cb7795d2463..063410d9b3ccd981fdbd429f57880414a1e3939b 100644 (file)
@@ -1188,6 +1188,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
                dev->admin_tagset.timeout = ADMIN_TIMEOUT;
                dev->admin_tagset.numa_node = dev_to_node(dev->dev);
                dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
+               dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
                dev->admin_tagset.driver_data = dev;
 
                if (blk_mq_alloc_tag_set(&dev->admin_tagset))
index 73b58b5be6e04ea044a40f22487c041396316699..5fffccf3b95fb2e31308b48d6ee336b3df654c5d 100644 (file)
@@ -152,6 +152,7 @@ enum {
        BLK_MQ_F_SG_MERGE       = 1 << 2,
        BLK_MQ_F_DEFER_ISSUE    = 1 << 4,
        BLK_MQ_F_BLOCKING       = 1 << 5,
+       BLK_MQ_F_NO_SCHED       = 1 << 6,
        BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
        BLK_MQ_F_ALLOC_POLICY_BITS = 1,