nvmet: Improve nvmet_alloc_ctrl() interface and implementation
authorDamien Le Moal <dlemoal@kernel.org>
Sat, 4 Jan 2025 04:59:40 +0000 (13:59 +0900)
committerKeith Busch <kbusch@kernel.org>
Sat, 11 Jan 2025 03:30:47 +0000 (19:30 -0800)
Introduce struct nvmet_alloc_ctrl_args to define the arguments for
the function nvmet_alloc_ctrl() to avoid the need for passing a pointer
to a struct nvmet_req as an argument. This new data structure aggregates
together the arguments that were passed to nvmet_alloc_ctrl()
(subsysnqn, hostnqn and kato), together with the struct nvmet_req fields
used by nvmet_alloc_ctrl(), that is, the fields port, p2p_client, and
ops as input and the result and error_loc fields as output, as well as a
status field. nvmet_alloc_ctrl() is also changed to return a pointer
to the allocated and initialized controller structure instead of a
status code, as the status is now returned through the status field of
struct nvmet_alloc_ctrl_args.

The function nvmet_setup_p2p_ns_map() is changed to not take a pointer
to a struct nvmet_req as argument, instead, directly specify the
p2p_client device pointer needed as argument.

The code in nvmet_execute_admin_connect() that initializes a new target
controller after allocating it is moved into nvmet_alloc_ctrl().
The code that sets up an admin queue for the controller (and the call
to nvmet_install_queue()) remains in nvmet_execute_admin_connect().

Finally, nvmet_alloc_ctrl() is also exported to allow target drivers to
use this function directly to allocate and initialize a new controller
structure without the need to rely on a fabrics connect command request.

Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Tested-by: Rick Wertenbroek <rick.wertenbroek@gmail.com>
Tested-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: Keith Busch <kbusch@kernel.org>
drivers/nvme/target/core.c
drivers/nvme/target/fabrics-cmd.c
drivers/nvme/target/nvmet.h

index 4b5594549ae62764c6336fb264a7a94c164813be..4909f3e5a55252a98b3733a9b84da6bf4e548ccb 100644 (file)
@@ -1350,15 +1350,15 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
  * Note: ctrl->subsys->lock should be held when calling this function
  */
 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
-               struct nvmet_req *req)
+               struct device *p2p_client)
 {
        struct nvmet_ns *ns;
        unsigned long idx;
 
-       if (!req->p2p_client)
+       if (!p2p_client)
                return;
 
-       ctrl->p2p_client = get_device(req->p2p_client);
+       ctrl->p2p_client = get_device(p2p_client);
 
        xa_for_each(&ctrl->subsys->namespaces, idx, ns)
                nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -1387,45 +1387,44 @@ static void nvmet_fatal_error_handler(struct work_struct *work)
        ctrl->ops->delete_ctrl(ctrl);
 }
 
-u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
-               struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
-               uuid_t *hostid)
+struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
 {
        struct nvmet_subsys *subsys;
        struct nvmet_ctrl *ctrl;
+       u32 kato = args->kato;
+       u8 dhchap_status;
        int ret;
-       u16 status;
 
-       status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
-       subsys = nvmet_find_get_subsys(req->port, subsysnqn);
+       args->status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
+       subsys = nvmet_find_get_subsys(args->port, args->subsysnqn);
        if (!subsys) {
                pr_warn("connect request for invalid subsystem %s!\n",
-                       subsysnqn);
-               req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
-               req->error_loc = offsetof(struct nvme_common_command, dptr);
-               goto out;
+                       args->subsysnqn);
+               args->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+               args->error_loc = offsetof(struct nvme_common_command, dptr);
+               return NULL;
        }
 
        down_read(&nvmet_config_sem);
-       if (!nvmet_host_allowed(subsys, hostnqn)) {
+       if (!nvmet_host_allowed(subsys, args->hostnqn)) {
                pr_info("connect by host %s for subsystem %s not allowed\n",
-                       hostnqn, subsysnqn);
-               req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
+                       args->hostnqn, args->subsysnqn);
+               args->result = IPO_IATTR_CONNECT_DATA(hostnqn);
                up_read(&nvmet_config_sem);
-               status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
-               req->error_loc = offsetof(struct nvme_common_command, dptr);
+               args->status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
+               args->error_loc = offsetof(struct nvme_common_command, dptr);
                goto out_put_subsystem;
        }
        up_read(&nvmet_config_sem);
 
-       status = NVME_SC_INTERNAL;
+       args->status = NVME_SC_INTERNAL;
        ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
        if (!ctrl)
                goto out_put_subsystem;
        mutex_init(&ctrl->lock);
 
-       ctrl->port = req->port;
-       ctrl->ops = req->ops;
+       ctrl->port = args->port;
+       ctrl->ops = args->ops;
 
 #ifdef CONFIG_NVME_TARGET_PASSTHRU
        /* By default, set loop targets to clear IDS by default */
@@ -1439,8 +1438,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
        INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
        INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
 
-       memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
-       memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
+       memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE);
+       memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE);
 
        kref_init(&ctrl->ref);
        ctrl->subsys = subsys;
@@ -1463,12 +1462,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
                             subsys->cntlid_min, subsys->cntlid_max,
                             GFP_KERNEL);
        if (ret < 0) {
-               status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
+               args->status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
                goto out_free_sqs;
        }
        ctrl->cntlid = ret;
 
-       uuid_copy(&ctrl->hostid, hostid);
+       uuid_copy(&ctrl->hostid, args->hostid);
 
        /*
         * Discovery controllers may use some arbitrary high value
@@ -1490,12 +1489,35 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
        if (ret)
                goto init_pr_fail;
        list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
-       nvmet_setup_p2p_ns_map(ctrl, req);
+       nvmet_setup_p2p_ns_map(ctrl, args->p2p_client);
        nvmet_debugfs_ctrl_setup(ctrl);
        mutex_unlock(&subsys->lock);
 
-       *ctrlp = ctrl;
-       return 0;
+       if (args->hostid)
+               uuid_copy(&ctrl->hostid, args->hostid);
+
+       dhchap_status = nvmet_setup_auth(ctrl);
+       if (dhchap_status) {
+               pr_err("Failed to setup authentication, dhchap status %u\n",
+                      dhchap_status);
+               nvmet_ctrl_put(ctrl);
+               if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
+                       args->status =
+                               NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
+               else
+                       args->status = NVME_SC_INTERNAL;
+               return NULL;
+       }
+
+       args->status = NVME_SC_SUCCESS;
+
+       pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s.\n",
+               nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
+               ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
+               ctrl->pi_support ? " T10-PI is enabled" : "",
+               nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
+
+       return ctrl;
 
 init_pr_fail:
        mutex_unlock(&subsys->lock);
@@ -1509,9 +1531,9 @@ out_free_ctrl:
        kfree(ctrl);
 out_put_subsystem:
        nvmet_subsys_put(subsys);
-out:
-       return status;
+       return NULL;
 }
+EXPORT_SYMBOL_GPL(nvmet_alloc_ctrl);
 
 static void nvmet_ctrl_free(struct kref *ref)
 {
@@ -1547,6 +1569,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
 {
        kref_put(&ctrl->ref, nvmet_ctrl_free);
 }
+EXPORT_SYMBOL_GPL(nvmet_ctrl_put);
 
 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
 {
index c49904ebb6c2d7ae36cd304f8fa95485dc4a37f6..8dbd7df8c9a0d82a9163fe41978b209f841c7157 100644 (file)
@@ -213,73 +213,67 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
        struct nvmf_connect_command *c = &req->cmd->connect;
        struct nvmf_connect_data *d;
        struct nvmet_ctrl *ctrl = NULL;
-       u16 status;
-       u8 dhchap_status;
+       struct nvmet_alloc_ctrl_args args = {
+               .port = req->port,
+               .ops = req->ops,
+               .p2p_client = req->p2p_client,
+               .kato = le32_to_cpu(c->kato),
+       };
 
        if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
                return;
 
        d = kmalloc(sizeof(*d), GFP_KERNEL);
        if (!d) {
-               status = NVME_SC_INTERNAL;
+               args.status = NVME_SC_INTERNAL;
                goto complete;
        }
 
-       status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
-       if (status)
+       args.status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+       if (args.status)
                goto out;
 
        if (c->recfmt != 0) {
                pr_warn("invalid connect version (%d).\n",
                        le16_to_cpu(c->recfmt));
-               req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
-               status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
+               args.error_loc = offsetof(struct nvmf_connect_command, recfmt);
+               args.status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
                goto out;
        }
 
        if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
                pr_warn("connect attempt for invalid controller ID %#x\n",
                        d->cntlid);
-               status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
-               req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
+               args.status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
+               args.result = IPO_IATTR_CONNECT_DATA(cntlid);
                goto out;
        }
 
        d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
        d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
-       status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
-                                 le32_to_cpu(c->kato), &ctrl, &d->hostid);
-       if (status)
-               goto out;
 
-       dhchap_status = nvmet_setup_auth(ctrl);
-       if (dhchap_status) {
-               pr_err("Failed to setup authentication, dhchap status %u\n",
-                      dhchap_status);
-               nvmet_ctrl_put(ctrl);
-               if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
-                       status = (NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR);
-               else
-                       status = NVME_SC_INTERNAL;
+       args.subsysnqn = d->subsysnqn;
+       args.hostnqn = d->hostnqn;
+       args.hostid = &d->hostid;
+       args.kato = c->kato;
+
+       ctrl = nvmet_alloc_ctrl(&args);
+       if (!ctrl)
                goto out;
-       }
 
-       status = nvmet_install_queue(ctrl, req);
-       if (status) {
+       args.status = nvmet_install_queue(ctrl, req);
+       if (args.status) {
                nvmet_ctrl_put(ctrl);
                goto out;
        }
 
-       pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n",
-               nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
-               ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
-               ctrl->pi_support ? " T10-PI is enabled" : "",
-               nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
-       req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
+       args.result = cpu_to_le32(nvmet_connect_result(ctrl));
 out:
        kfree(d);
 complete:
-       nvmet_req_complete(req, status);
+       req->error_loc = args.error_loc;
+       req->cqe->result.u32 = args.result;
+       nvmet_req_complete(req, args.status);
 }
 
 static void nvmet_execute_io_connect(struct nvmet_req *req)
index 4dad413e5fef04881f2177abb5adb96bdcc08028..ed7e8cd890e4bc25a9f135468991f37b4731a042 100644 (file)
@@ -549,9 +549,21 @@ int nvmet_sq_init(struct nvmet_sq *sq);
 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
 
 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
-u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
-               struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
-               uuid_t *hostid);
+
+struct nvmet_alloc_ctrl_args {
+       struct nvmet_port       *port;
+       char                    *subsysnqn;
+       char                    *hostnqn;
+       uuid_t                  *hostid;
+       const struct nvmet_fabrics_ops *ops;
+       struct device           *p2p_client;
+       u32                     kato;
+       u32                     result;
+       u16                     error_loc;
+       u16                     status;
+};
+
+struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args);
 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
                                       const char *hostnqn, u16 cntlid,
                                       struct nvmet_req *req);