nvme: make fabrics command run on a separate request queue
authorSagi Grimberg <sagi@grimberg.me>
Sat, 3 Aug 2019 02:33:59 +0000 (19:33 -0700)
committerSagi Grimberg <sagi@grimberg.me>
Thu, 29 Aug 2019 19:55:03 +0000 (12:55 -0700)
We have a fundamental issue that fabric commands use the admin_q.
The reason is, that admin-connect, register reads and writes and
admin commands cannot be guaranteed ordering while we are running
controller resets.

For example, when we reset a controller we perform:
1. disable the controller
2. teardown the admin queue
3. re-establish the admin queue
4. enable the controller

In order to perform (3), we need to unquiesce the admin queue, however
we may have some admin commands that are already pending on the
quiesced admin_q and will immediate execute when we unquiesce it before
we execute (4). The host must not send admin commands to the controller
before enabling the controller.

To fix this, we have the fabric commands (admin connect and property
get/set, but not I/O queue connect) use a separate fabrics_q and make
sure to quiesce the admin_q before we disable the controller, and
unquiesce it only after we enable the controller.

This fixes the error prints from nvmet in a controller reset storm test:
kernel: nvmet: got cmd 6 while CC.EN == 0 on qid = 0
Which indicate that the host is sending an admin command when the
controller is not enabled.

Reviewed-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
drivers/nvme/host/fabrics.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/loop.c

index 854ce75e6c2ddac9e23e2f9eb98d77224f6a1708..145c210edb03ef01dde2008e1485b3c9fdb9a6d9 100644 (file)
@@ -150,7 +150,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
        cmd.prop_get.fctype = nvme_fabrics_type_property_get;
        cmd.prop_get.offset = cpu_to_le32(off);
 
-       ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
+       ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
                        NVME_QID_ANY, 0, 0, false);
 
        if (ret >= 0)
@@ -197,7 +197,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
        cmd.prop_get.attrib = 1;
        cmd.prop_get.offset = cpu_to_le32(off);
 
-       ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
+       ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
                        NVME_QID_ANY, 0, 0, false);
 
        if (ret >= 0)
@@ -243,7 +243,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
        cmd.prop_set.offset = cpu_to_le32(off);
        cmd.prop_set.value = cpu_to_le64(val);
 
-       ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, NULL, 0, 0,
+       ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
                        NVME_QID_ANY, 0, 0, false);
        if (unlikely(ret))
                dev_err(ctrl->device,
@@ -396,7 +396,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
        strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
        strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
 
-       ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res,
+       ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
                        data, sizeof(*data), 0, NVME_QID_ANY, 1,
                        BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
        if (ret) {
index ec264b2e54c337d043137bc3c8ab80a9e16e7995..49577a33d25bb903702a10bc44a19b0164e8f37d 100644 (file)
@@ -2006,6 +2006,7 @@ nvme_fc_ctrl_free(struct kref *ref)
 
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
+       blk_cleanup_queue(ctrl->ctrl.fabrics_q);
        blk_mq_free_tag_set(&ctrl->admin_tag_set);
 
        kfree(ctrl->queues);
@@ -2633,8 +2634,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        if (ret)
                goto out_delete_hw_queue;
 
-       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
-
        ret = nvmf_connect_admin_queue(&ctrl->ctrl);
        if (ret)
                goto out_disconnect_admin_queue;
@@ -2655,6 +2654,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        ctrl->ctrl.max_hw_sectors =
                (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
 
+       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+
        ret = nvme_init_identify(&ctrl->ctrl);
        if (ret)
                goto out_disconnect_admin_queue;
@@ -3101,10 +3102,16 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
                goto out_free_queues;
        ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
 
+       ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+       if (IS_ERR(ctrl->ctrl.fabrics_q)) {
+               ret = PTR_ERR(ctrl->ctrl.fabrics_q);
+               goto out_free_admin_tag_set;
+       }
+
        ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
        if (IS_ERR(ctrl->ctrl.admin_q)) {
                ret = PTR_ERR(ctrl->ctrl.admin_q);
-               goto out_free_admin_tag_set;
+               goto out_cleanup_fabrics_q;
        }
 
        /*
@@ -3176,6 +3183,8 @@ fail_ctrl:
 
 out_cleanup_admin_q:
        blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_cleanup_fabrics_q:
+       blk_cleanup_queue(ctrl->ctrl.fabrics_q);
 out_free_admin_tag_set:
        blk_mq_free_tag_set(&ctrl->admin_tag_set);
 out_free_queues:
index 624c3ea2134c43826627c33d4b943a31386f45d3..a818313a1f15c87d1bccf5f18c8be4f867a20a42 100644 (file)
@@ -181,6 +181,7 @@ struct nvme_ctrl {
        const struct nvme_ctrl_ops *ops;
        struct request_queue *admin_q;
        struct request_queue *connect_q;
+       struct request_queue *fabrics_q;
        struct device *dev;
        int instance;
        int numa_node;
index 5143e2a5d54c5614d22e5afa711d33b5029aab80..0ef05a75c428f6aab949928061db54e20dcc2a89 100644 (file)
@@ -751,6 +751,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
 {
        if (remove) {
                blk_cleanup_queue(ctrl->ctrl.admin_q);
+               blk_cleanup_queue(ctrl->ctrl.fabrics_q);
                blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
        }
        if (ctrl->async_event_sqe.data) {
@@ -792,10 +793,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
                        goto out_free_async_qe;
                }
 
+               ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+               if (IS_ERR(ctrl->ctrl.fabrics_q)) {
+                       error = PTR_ERR(ctrl->ctrl.fabrics_q);
+                       goto out_free_tagset;
+               }
+
                ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
                if (IS_ERR(ctrl->ctrl.admin_q)) {
                        error = PTR_ERR(ctrl->ctrl.admin_q);
-                       goto out_free_tagset;
+                       goto out_cleanup_fabrics_q;
                }
        }
 
@@ -810,6 +817,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
        ctrl->ctrl.max_hw_sectors =
                (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
 
+       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+
        error = nvme_init_identify(&ctrl->ctrl);
        if (error)
                goto out_stop_queue;
@@ -821,6 +830,9 @@ out_stop_queue:
 out_cleanup_queue:
        if (new)
                blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_cleanup_fabrics_q:
+       if (new)
+               blk_cleanup_queue(ctrl->ctrl.fabrics_q);
 out_free_tagset:
        if (new)
                blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
@@ -895,7 +907,8 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
                        nvme_cancel_request, &ctrl->ctrl);
                blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
        }
-       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+       if (remove)
+               blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
        nvme_rdma_destroy_admin_queue(ctrl, remove);
 }
 
@@ -1046,6 +1059,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
        nvme_rdma_teardown_io_queues(ctrl, false);
        nvme_start_queues(&ctrl->ctrl);
        nvme_rdma_teardown_admin_queue(ctrl, false);
+       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
                /* state change failure is ok if we're in DELETING state */
@@ -1858,6 +1872,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
        cancel_delayed_work_sync(&ctrl->reconnect_work);
 
        nvme_rdma_teardown_io_queues(ctrl, shutdown);
+       blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
        if (shutdown)
                nvme_shutdown_ctrl(&ctrl->ctrl);
        else
index a9c3f28eedd71c80cf2ad3e2d6dd7aed4fa15931..2d8ba31cb691ca51d7afff381a03fb612d2426f7 100644 (file)
@@ -1703,6 +1703,7 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
        nvme_tcp_stop_queue(ctrl, 0);
        if (remove) {
                blk_cleanup_queue(ctrl->admin_q);
+               blk_cleanup_queue(ctrl->fabrics_q);
                blk_mq_free_tag_set(ctrl->admin_tagset);
        }
        nvme_tcp_free_admin_queue(ctrl);
@@ -1723,10 +1724,16 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
                        goto out_free_queue;
                }
 
+               ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
+               if (IS_ERR(ctrl->fabrics_q)) {
+                       error = PTR_ERR(ctrl->fabrics_q);
+                       goto out_free_tagset;
+               }
+
                ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
                if (IS_ERR(ctrl->admin_q)) {
                        error = PTR_ERR(ctrl->admin_q);
-                       goto out_free_tagset;
+                       goto out_cleanup_fabrics_q;
                }
        }
 
@@ -1738,6 +1745,8 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
        if (error)
                goto out_stop_queue;
 
+       blk_mq_unquiesce_queue(ctrl->admin_q);
+
        error = nvme_init_identify(ctrl);
        if (error)
                goto out_stop_queue;
@@ -1749,6 +1758,9 @@ out_stop_queue:
 out_cleanup_queue:
        if (new)
                blk_cleanup_queue(ctrl->admin_q);
+out_cleanup_fabrics_q:
+       if (new)
+               blk_cleanup_queue(ctrl->fabrics_q);
 out_free_tagset:
        if (new)
                blk_mq_free_tag_set(ctrl->admin_tagset);
@@ -1767,7 +1779,8 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
                        nvme_cancel_request, ctrl);
                blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
        }
-       blk_mq_unquiesce_queue(ctrl->admin_q);
+       if (remove)
+               blk_mq_unquiesce_queue(ctrl->admin_q);
        nvme_tcp_destroy_admin_queue(ctrl, remove);
 }
 
@@ -1894,6 +1907,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
        /* unquiesce to fail fast pending requests */
        nvme_start_queues(ctrl);
        nvme_tcp_teardown_admin_queue(ctrl, false);
+       blk_mq_unquiesce_queue(ctrl->admin_q);
 
        if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
                /* state change failure is ok if we're in DELETING state */
@@ -1910,6 +1924,7 @@ static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
        cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
 
        nvme_tcp_teardown_io_queues(ctrl, shutdown);
+       blk_mq_quiesce_queue(ctrl->admin_q);
        if (shutdown)
                nvme_shutdown_ctrl(ctrl);
        else
index ec0bc57d26fcfd49ecf1c0af9e3c8944e1f02f9b..9ee093b9fc74e6181d9f47c48311fb6eb2cf5f8a 100644 (file)
@@ -253,6 +253,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
        clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
        nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
+       blk_cleanup_queue(ctrl->ctrl.fabrics_q);
        blk_mq_free_tag_set(&ctrl->admin_tag_set);
 }
 
@@ -357,10 +358,16 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
                goto out_free_sq;
        ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
 
+       ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+       if (IS_ERR(ctrl->ctrl.fabrics_q)) {
+               error = PTR_ERR(ctrl->ctrl.fabrics_q);
+               goto out_free_tagset;
+       }
+
        ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
        if (IS_ERR(ctrl->ctrl.admin_q)) {
                error = PTR_ERR(ctrl->ctrl.admin_q);
-               goto out_free_tagset;
+               goto out_cleanup_fabrics_q;
        }
 
        error = nvmf_connect_admin_queue(&ctrl->ctrl);
@@ -376,6 +383,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        ctrl->ctrl.max_hw_sectors =
                (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
 
+       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+
        error = nvme_init_identify(&ctrl->ctrl);
        if (error)
                goto out_cleanup_queue;
@@ -384,6 +393,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 
 out_cleanup_queue:
        blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_cleanup_fabrics_q:
+       blk_cleanup_queue(ctrl->ctrl.fabrics_q);
 out_free_tagset:
        blk_mq_free_tag_set(&ctrl->admin_tag_set);
 out_free_sq:
@@ -401,14 +412,13 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
                nvme_loop_destroy_io_queues(ctrl);
        }
 
+       blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
        if (ctrl->ctrl.state == NVME_CTRL_LIVE)
                nvme_shutdown_ctrl(&ctrl->ctrl);
 
-       blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_cancel_request, &ctrl->ctrl);
        blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
-       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
        nvme_loop_destroy_admin_queue(ctrl);
 }