1 // SPDX-License-Identifier: GPL-2.0
3 * Common code for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
13 #define CREATE_TRACE_POINTS
18 struct workqueue_struct *buffered_io_wq;
19 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
20 static DEFINE_IDA(cntlid_ida);
23 * This read/write semaphore is used to synchronize access to configuration
24 * information on a target system that will result in discovery log page
25 * information change for at least one host.
26 * The full list of resources to protected by this semaphore is:
29 * - per-subsystem allowed hosts list
30 * - allow_any_host subsystem attribute
32 * - the nvmet_transports array
34 * When updating any of those lists/structures write lock should be obtained,
35 * while when reading (popolating discovery log page or checking host-subsystem
36 * link) read lock is obtained to allow concurrent reads.
38 DECLARE_RWSEM(nvmet_config_sem);
40 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
42 DECLARE_RWSEM(nvmet_ana_sem);
44 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
50 req->error_loc = offsetof(struct nvme_rw_command, length);
51 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
54 req->error_loc = offsetof(struct nvme_rw_command, slba);
55 status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
58 req->error_loc = offsetof(struct nvme_common_command, opcode);
59 switch (req->cmd->common.opcode) {
61 case nvme_cmd_write_zeroes:
62 status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
65 status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
69 req->error_loc = offsetof(struct nvme_rw_command, nsid);
70 status = NVME_SC_ACCESS_DENIED;
75 req->error_loc = offsetof(struct nvme_common_command, opcode);
76 status = NVME_SC_INTERNAL | NVME_SC_DNR;
82 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
83 const char *subsysnqn);
85 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
88 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
89 req->error_loc = offsetof(struct nvme_common_command, dptr);
90 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
95 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
97 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
98 req->error_loc = offsetof(struct nvme_common_command, dptr);
99 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
104 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
106 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
107 req->error_loc = offsetof(struct nvme_common_command, dptr);
108 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
113 static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
117 if (list_empty(&subsys->namespaces))
120 ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
124 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
126 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
129 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
131 struct nvmet_req *req;
134 mutex_lock(&ctrl->lock);
135 if (!ctrl->nr_async_event_cmds) {
136 mutex_unlock(&ctrl->lock);
140 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
141 mutex_unlock(&ctrl->lock);
142 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
146 static void nvmet_async_event_work(struct work_struct *work)
148 struct nvmet_ctrl *ctrl =
149 container_of(work, struct nvmet_ctrl, async_event_work);
150 struct nvmet_async_event *aen;
151 struct nvmet_req *req;
154 mutex_lock(&ctrl->lock);
155 aen = list_first_entry_or_null(&ctrl->async_events,
156 struct nvmet_async_event, entry);
157 if (!aen || !ctrl->nr_async_event_cmds) {
158 mutex_unlock(&ctrl->lock);
162 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
163 nvmet_set_result(req, nvmet_async_event_result(aen));
165 list_del(&aen->entry);
168 mutex_unlock(&ctrl->lock);
169 nvmet_req_complete(req, 0);
173 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
174 u8 event_info, u8 log_page)
176 struct nvmet_async_event *aen;
178 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
182 aen->event_type = event_type;
183 aen->event_info = event_info;
184 aen->log_page = log_page;
186 mutex_lock(&ctrl->lock);
187 list_add_tail(&aen->entry, &ctrl->async_events);
188 mutex_unlock(&ctrl->lock);
190 schedule_work(&ctrl->async_event_work);
193 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
197 mutex_lock(&ctrl->lock);
198 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
201 for (i = 0; i < ctrl->nr_changed_ns; i++) {
202 if (ctrl->changed_ns_list[i] == nsid)
206 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
207 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
208 ctrl->nr_changed_ns = U32_MAX;
212 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
214 mutex_unlock(&ctrl->lock);
217 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
219 struct nvmet_ctrl *ctrl;
221 lockdep_assert_held(&subsys->lock);
223 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
224 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
225 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
227 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
228 NVME_AER_NOTICE_NS_CHANGED,
229 NVME_LOG_CHANGED_NS);
233 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
234 struct nvmet_port *port)
236 struct nvmet_ctrl *ctrl;
238 mutex_lock(&subsys->lock);
239 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
240 if (port && ctrl->port != port)
242 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
244 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
245 NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
247 mutex_unlock(&subsys->lock);
250 void nvmet_port_send_ana_event(struct nvmet_port *port)
252 struct nvmet_subsys_link *p;
254 down_read(&nvmet_config_sem);
255 list_for_each_entry(p, &port->subsystems, entry)
256 nvmet_send_ana_event(p->subsys, port);
257 up_read(&nvmet_config_sem);
260 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
264 down_write(&nvmet_config_sem);
265 if (nvmet_transports[ops->type])
268 nvmet_transports[ops->type] = ops;
269 up_write(&nvmet_config_sem);
273 EXPORT_SYMBOL_GPL(nvmet_register_transport);
275 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
277 down_write(&nvmet_config_sem);
278 nvmet_transports[ops->type] = NULL;
279 up_write(&nvmet_config_sem);
281 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
283 int nvmet_enable_port(struct nvmet_port *port)
285 const struct nvmet_fabrics_ops *ops;
288 lockdep_assert_held(&nvmet_config_sem);
290 ops = nvmet_transports[port->disc_addr.trtype];
292 up_write(&nvmet_config_sem);
293 request_module("nvmet-transport-%d", port->disc_addr.trtype);
294 down_write(&nvmet_config_sem);
295 ops = nvmet_transports[port->disc_addr.trtype];
297 pr_err("transport type %d not supported\n",
298 port->disc_addr.trtype);
303 if (!try_module_get(ops->owner))
306 ret = ops->add_port(port);
308 module_put(ops->owner);
312 /* If the transport didn't set inline_data_size, then disable it. */
313 if (port->inline_data_size < 0)
314 port->inline_data_size = 0;
316 port->enabled = true;
321 void nvmet_disable_port(struct nvmet_port *port)
323 const struct nvmet_fabrics_ops *ops;
325 lockdep_assert_held(&nvmet_config_sem);
327 port->enabled = false;
330 ops = nvmet_transports[port->disc_addr.trtype];
331 ops->remove_port(port);
332 module_put(ops->owner);
335 static void nvmet_keep_alive_timer(struct work_struct *work)
337 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
338 struct nvmet_ctrl, ka_work);
339 bool cmd_seen = ctrl->cmd_seen;
341 ctrl->cmd_seen = false;
343 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
345 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
349 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
350 ctrl->cntlid, ctrl->kato);
352 nvmet_ctrl_fatal_error(ctrl);
355 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
357 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
358 ctrl->cntlid, ctrl->kato);
360 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
361 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
364 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
366 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
368 cancel_delayed_work_sync(&ctrl->ka_work);
371 static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
376 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
377 if (ns->nsid == le32_to_cpu(nsid))
384 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
389 ns = __nvmet_find_namespace(ctrl, nsid);
391 percpu_ref_get(&ns->ref);
397 static void nvmet_destroy_namespace(struct percpu_ref *ref)
399 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
401 complete(&ns->disable_done);
404 void nvmet_put_namespace(struct nvmet_ns *ns)
406 percpu_ref_put(&ns->ref);
409 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
411 nvmet_bdev_ns_disable(ns);
412 nvmet_file_ns_disable(ns);
415 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
418 struct pci_dev *p2p_dev;
424 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
428 if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) {
429 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
435 ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
440 * Right now we just check that there is p2pmem available so
441 * we can report an error to the user right away if there
442 * is not. We'll find the actual device to use once we
443 * setup the controller when the port's device is available.
446 p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
448 pr_err("no peer-to-peer memory is available for %s\n",
453 pci_dev_put(p2p_dev);
460 * Note: ctrl->subsys->lock should be held when calling this function
462 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
465 struct device *clients[2];
466 struct pci_dev *p2p_dev;
469 if (!ctrl->p2p_client || !ns->use_p2pmem)
473 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
477 p2p_dev = pci_dev_get(ns->p2p_dev);
479 clients[0] = ctrl->p2p_client;
480 clients[1] = nvmet_ns_dev(ns);
482 p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
484 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
485 dev_name(ctrl->p2p_client), ns->device_path);
490 ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
492 pci_dev_put(p2p_dev);
494 pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
498 int nvmet_ns_enable(struct nvmet_ns *ns)
500 struct nvmet_subsys *subsys = ns->subsys;
501 struct nvmet_ctrl *ctrl;
504 mutex_lock(&subsys->lock);
510 if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
513 ret = nvmet_bdev_ns_enable(ns);
515 ret = nvmet_file_ns_enable(ns);
519 ret = nvmet_p2pmem_ns_enable(ns);
521 goto out_dev_disable;
523 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
524 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
526 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
531 if (ns->nsid > subsys->max_nsid)
532 subsys->max_nsid = ns->nsid;
535 * The namespaces list needs to be sorted to simplify the implementation
536 * of the Identify Namepace List subcommand.
538 if (list_empty(&subsys->namespaces)) {
539 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
541 struct nvmet_ns *old;
543 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
544 BUG_ON(ns->nsid == old->nsid);
545 if (ns->nsid < old->nsid)
549 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
551 subsys->nr_namespaces++;
553 nvmet_ns_changed(subsys, ns->nsid);
557 mutex_unlock(&subsys->lock);
560 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
561 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
563 nvmet_ns_dev_disable(ns);
567 void nvmet_ns_disable(struct nvmet_ns *ns)
569 struct nvmet_subsys *subsys = ns->subsys;
570 struct nvmet_ctrl *ctrl;
572 mutex_lock(&subsys->lock);
577 list_del_rcu(&ns->dev_link);
578 if (ns->nsid == subsys->max_nsid)
579 subsys->max_nsid = nvmet_max_nsid(subsys);
581 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
582 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
584 mutex_unlock(&subsys->lock);
587 * Now that we removed the namespaces from the lookup list, we
588 * can kill the per_cpu ref and wait for any remaining references
589 * to be dropped, as well as a RCU grace period for anyone only
590 * using the namepace under rcu_read_lock(). Note that we can't
591 * use call_rcu here as we need to ensure the namespaces have
592 * been fully destroyed before unloading the module.
594 percpu_ref_kill(&ns->ref);
596 wait_for_completion(&ns->disable_done);
597 percpu_ref_exit(&ns->ref);
599 mutex_lock(&subsys->lock);
601 subsys->nr_namespaces--;
602 nvmet_ns_changed(subsys, ns->nsid);
603 nvmet_ns_dev_disable(ns);
605 mutex_unlock(&subsys->lock);
608 void nvmet_ns_free(struct nvmet_ns *ns)
610 nvmet_ns_disable(ns);
612 down_write(&nvmet_ana_sem);
613 nvmet_ana_group_enabled[ns->anagrpid]--;
614 up_write(&nvmet_ana_sem);
616 kfree(ns->device_path);
620 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
624 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
628 INIT_LIST_HEAD(&ns->dev_link);
629 init_completion(&ns->disable_done);
634 down_write(&nvmet_ana_sem);
635 ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
636 nvmet_ana_group_enabled[ns->anagrpid]++;
637 up_write(&nvmet_ana_sem);
640 ns->buffered_io = false;
645 static void nvmet_update_sq_head(struct nvmet_req *req)
648 u32 old_sqhd, new_sqhd;
651 old_sqhd = req->sq->sqhd;
652 new_sqhd = (old_sqhd + 1) % req->sq->size;
653 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
656 req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
659 static void nvmet_set_error(struct nvmet_req *req, u16 status)
661 struct nvmet_ctrl *ctrl = req->sq->ctrl;
662 struct nvme_error_slot *new_error_slot;
665 req->cqe->status = cpu_to_le16(status << 1);
667 if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
670 spin_lock_irqsave(&ctrl->error_lock, flags);
673 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
675 new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
676 new_error_slot->sqid = cpu_to_le16(req->sq->qid);
677 new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
678 new_error_slot->status_field = cpu_to_le16(status << 1);
679 new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
680 new_error_slot->lba = cpu_to_le64(req->error_slba);
681 new_error_slot->nsid = req->cmd->common.nsid;
682 spin_unlock_irqrestore(&ctrl->error_lock, flags);
684 /* set the more bit for this request */
685 req->cqe->status |= cpu_to_le16(1 << 14);
688 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
690 if (!req->sq->sqhd_disabled)
691 nvmet_update_sq_head(req);
692 req->cqe->sq_id = cpu_to_le16(req->sq->qid);
693 req->cqe->command_id = req->cmd->common.command_id;
695 if (unlikely(status))
696 nvmet_set_error(req, status);
698 trace_nvmet_req_complete(req);
701 nvmet_put_namespace(req->ns);
702 req->ops->queue_response(req);
705 void nvmet_req_complete(struct nvmet_req *req, u16 status)
707 __nvmet_req_complete(req, status);
708 percpu_ref_put(&req->sq->ref);
710 EXPORT_SYMBOL_GPL(nvmet_req_complete);
712 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
721 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
731 static void nvmet_confirm_sq(struct percpu_ref *ref)
733 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
735 complete(&sq->confirm_done);
738 void nvmet_sq_destroy(struct nvmet_sq *sq)
741 * If this is the admin queue, complete all AERs so that our
742 * queue doesn't have outstanding requests on it.
744 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
745 nvmet_async_events_free(sq->ctrl);
746 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
747 wait_for_completion(&sq->confirm_done);
748 wait_for_completion(&sq->free_done);
749 percpu_ref_exit(&sq->ref);
752 nvmet_ctrl_put(sq->ctrl);
753 sq->ctrl = NULL; /* allows reusing the queue later */
756 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
758 static void nvmet_sq_free(struct percpu_ref *ref)
760 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
762 complete(&sq->free_done);
765 int nvmet_sq_init(struct nvmet_sq *sq)
769 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
771 pr_err("percpu_ref init failed!\n");
774 init_completion(&sq->free_done);
775 init_completion(&sq->confirm_done);
779 EXPORT_SYMBOL_GPL(nvmet_sq_init);
781 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
784 enum nvme_ana_state state = port->ana_state[ns->anagrpid];
786 if (unlikely(state == NVME_ANA_INACCESSIBLE))
787 return NVME_SC_ANA_INACCESSIBLE;
788 if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
789 return NVME_SC_ANA_PERSISTENT_LOSS;
790 if (unlikely(state == NVME_ANA_CHANGE))
791 return NVME_SC_ANA_TRANSITION;
795 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
797 if (unlikely(req->ns->readonly)) {
798 switch (req->cmd->common.opcode) {
803 return NVME_SC_NS_WRITE_PROTECTED;
810 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
812 struct nvme_command *cmd = req->cmd;
815 ret = nvmet_check_ctrl_status(req, cmd);
819 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
820 if (unlikely(!req->ns)) {
821 req->error_loc = offsetof(struct nvme_common_command, nsid);
822 return NVME_SC_INVALID_NS | NVME_SC_DNR;
824 ret = nvmet_check_ana_state(req->port, req->ns);
826 req->error_loc = offsetof(struct nvme_common_command, nsid);
829 ret = nvmet_io_cmd_check_access(req);
831 req->error_loc = offsetof(struct nvme_common_command, nsid);
836 return nvmet_file_parse_io_cmd(req);
838 return nvmet_bdev_parse_io_cmd(req);
841 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
842 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
844 u8 flags = req->cmd->common.flags;
852 req->transfer_len = 0;
853 req->cqe->status = 0;
854 req->cqe->sq_head = 0;
856 req->error_loc = NVMET_NO_ERROR_LOC;
859 trace_nvmet_req_init(req, req->cmd);
861 /* no support for fused commands yet */
862 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
863 req->error_loc = offsetof(struct nvme_common_command, flags);
864 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
869 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
870 * contains an address of a single contiguous physical buffer that is
873 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
874 req->error_loc = offsetof(struct nvme_common_command, flags);
875 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
879 if (unlikely(!req->sq->ctrl))
880 /* will return an error for any Non-connect command: */
881 status = nvmet_parse_connect_cmd(req);
882 else if (likely(req->sq->qid != 0))
883 status = nvmet_parse_io_cmd(req);
884 else if (nvme_is_fabrics(req->cmd))
885 status = nvmet_parse_fabrics_cmd(req);
886 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
887 status = nvmet_parse_discovery_cmd(req);
889 status = nvmet_parse_admin_cmd(req);
894 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
895 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
900 sq->ctrl->cmd_seen = true;
905 __nvmet_req_complete(req, status);
908 EXPORT_SYMBOL_GPL(nvmet_req_init);
910 void nvmet_req_uninit(struct nvmet_req *req)
912 percpu_ref_put(&req->sq->ref);
914 nvmet_put_namespace(req->ns);
916 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
918 void nvmet_req_execute(struct nvmet_req *req)
920 if (unlikely(req->data_len != req->transfer_len)) {
921 req->error_loc = offsetof(struct nvme_common_command, dptr);
922 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
926 EXPORT_SYMBOL_GPL(nvmet_req_execute);
928 int nvmet_req_alloc_sgl(struct nvmet_req *req)
930 struct pci_dev *p2p_dev = NULL;
932 if (IS_ENABLED(CONFIG_PCI_P2PDMA)) {
933 if (req->sq->ctrl && req->ns)
934 p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
938 if (req->sq->qid && p2p_dev) {
939 req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
942 req->p2p_dev = p2p_dev;
948 * If no P2P memory was available we fallback to using
953 req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
959 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl);
961 void nvmet_req_free_sgl(struct nvmet_req *req)
964 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
971 EXPORT_SYMBOL_GPL(nvmet_req_free_sgl);
973 static inline bool nvmet_cc_en(u32 cc)
975 return (cc >> NVME_CC_EN_SHIFT) & 0x1;
978 static inline u8 nvmet_cc_css(u32 cc)
980 return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
983 static inline u8 nvmet_cc_mps(u32 cc)
985 return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
988 static inline u8 nvmet_cc_ams(u32 cc)
990 return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
993 static inline u8 nvmet_cc_shn(u32 cc)
995 return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
998 static inline u8 nvmet_cc_iosqes(u32 cc)
1000 return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1003 static inline u8 nvmet_cc_iocqes(u32 cc)
1005 return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1008 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1010 lockdep_assert_held(&ctrl->lock);
1012 if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1013 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
1014 nvmet_cc_mps(ctrl->cc) != 0 ||
1015 nvmet_cc_ams(ctrl->cc) != 0 ||
1016 nvmet_cc_css(ctrl->cc) != 0) {
1017 ctrl->csts = NVME_CSTS_CFS;
1021 ctrl->csts = NVME_CSTS_RDY;
1024 * Controllers that are not yet enabled should not really enforce the
1025 * keep alive timeout, but we still want to track a timeout and cleanup
1026 * in case a host died before it enabled the controller. Hence, simply
1027 * reset the keep alive timer when the controller is enabled.
1029 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1032 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1034 lockdep_assert_held(&ctrl->lock);
1036 /* XXX: tear down queues? */
1037 ctrl->csts &= ~NVME_CSTS_RDY;
1041 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1045 mutex_lock(&ctrl->lock);
1049 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1050 nvmet_start_ctrl(ctrl);
1051 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1052 nvmet_clear_ctrl(ctrl);
1053 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1054 nvmet_clear_ctrl(ctrl);
1055 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1057 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1058 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1059 mutex_unlock(&ctrl->lock);
1062 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1064 /* command sets supported: NVMe command set: */
1065 ctrl->cap = (1ULL << 37);
1066 /* CC.EN timeout in 500msec units: */
1067 ctrl->cap |= (15ULL << 24);
1068 /* maximum queue entries supported: */
1069 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1072 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
1073 struct nvmet_req *req, struct nvmet_ctrl **ret)
1075 struct nvmet_subsys *subsys;
1076 struct nvmet_ctrl *ctrl;
1079 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1081 pr_warn("connect request for invalid subsystem %s!\n",
1083 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1084 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1087 mutex_lock(&subsys->lock);
1088 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1089 if (ctrl->cntlid == cntlid) {
1090 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1091 pr_warn("hostnqn mismatch.\n");
1094 if (!kref_get_unless_zero(&ctrl->ref))
1102 pr_warn("could not find controller %d for subsys %s / host %s\n",
1103 cntlid, subsysnqn, hostnqn);
1104 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1105 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1108 mutex_unlock(&subsys->lock);
1109 nvmet_subsys_put(subsys);
1113 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
1115 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1116 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1117 cmd->common.opcode, req->sq->qid);
1118 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1121 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1122 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1123 cmd->common.opcode, req->sq->qid);
1124 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1129 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1131 struct nvmet_host_link *p;
1133 lockdep_assert_held(&nvmet_config_sem);
1135 if (subsys->allow_any_host)
1138 if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
1141 list_for_each_entry(p, &subsys->hosts, entry) {
1142 if (!strcmp(nvmet_host_name(p->host), hostnqn))
1150 * Note: ctrl->subsys->lock should be held when calling this function
1152 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1153 struct nvmet_req *req)
1155 struct nvmet_ns *ns;
1157 if (!req->p2p_client)
1160 ctrl->p2p_client = get_device(req->p2p_client);
1162 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
1163 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1167 * Note: ctrl->subsys->lock should be held when calling this function
1169 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1171 struct radix_tree_iter iter;
1174 radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1175 pci_dev_put(radix_tree_deref_slot(slot));
1177 put_device(ctrl->p2p_client);
1180 static void nvmet_fatal_error_handler(struct work_struct *work)
1182 struct nvmet_ctrl *ctrl =
1183 container_of(work, struct nvmet_ctrl, fatal_err_work);
1185 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1186 ctrl->ops->delete_ctrl(ctrl);
1189 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1190 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1192 struct nvmet_subsys *subsys;
1193 struct nvmet_ctrl *ctrl;
1197 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1198 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1200 pr_warn("connect request for invalid subsystem %s!\n",
1202 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1206 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1207 down_read(&nvmet_config_sem);
1208 if (!nvmet_host_allowed(subsys, hostnqn)) {
1209 pr_info("connect by host %s for subsystem %s not allowed\n",
1210 hostnqn, subsysnqn);
1211 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1212 up_read(&nvmet_config_sem);
1213 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1214 goto out_put_subsystem;
1216 up_read(&nvmet_config_sem);
1218 status = NVME_SC_INTERNAL;
1219 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1221 goto out_put_subsystem;
1222 mutex_init(&ctrl->lock);
1224 nvmet_init_cap(ctrl);
1226 ctrl->port = req->port;
1228 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1229 INIT_LIST_HEAD(&ctrl->async_events);
1230 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1231 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1233 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1234 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1236 kref_init(&ctrl->ref);
1237 ctrl->subsys = subsys;
1238 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1240 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1241 sizeof(__le32), GFP_KERNEL);
1242 if (!ctrl->changed_ns_list)
1245 ctrl->cqs = kcalloc(subsys->max_qid + 1,
1246 sizeof(struct nvmet_cq *),
1249 goto out_free_changed_ns_list;
1251 ctrl->sqs = kcalloc(subsys->max_qid + 1,
1252 sizeof(struct nvmet_sq *),
1257 ret = ida_simple_get(&cntlid_ida,
1258 NVME_CNTLID_MIN, NVME_CNTLID_MAX,
1261 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1266 ctrl->ops = req->ops;
1269 * Discovery controllers may use some arbitrary high value
1270 * in order to cleanup stale discovery sessions
1272 if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
1273 kato = NVMET_DISC_KATO_MS;
1275 /* keep-alive timeout in seconds */
1276 ctrl->kato = DIV_ROUND_UP(kato, 1000);
1278 ctrl->err_counter = 0;
1279 spin_lock_init(&ctrl->error_lock);
1281 nvmet_start_keep_alive_timer(ctrl);
1283 mutex_lock(&subsys->lock);
1284 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1285 nvmet_setup_p2p_ns_map(ctrl, req);
1286 mutex_unlock(&subsys->lock);
1295 out_free_changed_ns_list:
1296 kfree(ctrl->changed_ns_list);
1300 nvmet_subsys_put(subsys);
1305 static void nvmet_ctrl_free(struct kref *ref)
1307 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1308 struct nvmet_subsys *subsys = ctrl->subsys;
1310 mutex_lock(&subsys->lock);
1311 nvmet_release_p2p_ns_map(ctrl);
1312 list_del(&ctrl->subsys_entry);
1313 mutex_unlock(&subsys->lock);
1315 nvmet_stop_keep_alive_timer(ctrl);
1317 flush_work(&ctrl->async_event_work);
1318 cancel_work_sync(&ctrl->fatal_err_work);
1320 ida_simple_remove(&cntlid_ida, ctrl->cntlid);
1324 kfree(ctrl->changed_ns_list);
1327 nvmet_subsys_put(subsys);
1330 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1332 kref_put(&ctrl->ref, nvmet_ctrl_free);
1335 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1337 mutex_lock(&ctrl->lock);
1338 if (!(ctrl->csts & NVME_CSTS_CFS)) {
1339 ctrl->csts |= NVME_CSTS_CFS;
1340 schedule_work(&ctrl->fatal_err_work);
1342 mutex_unlock(&ctrl->lock);
1344 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1346 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1347 const char *subsysnqn)
1349 struct nvmet_subsys_link *p;
1354 if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1355 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1357 return nvmet_disc_subsys;
1360 down_read(&nvmet_config_sem);
1361 list_for_each_entry(p, &port->subsystems, entry) {
1362 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1364 if (!kref_get_unless_zero(&p->subsys->ref))
1366 up_read(&nvmet_config_sem);
1370 up_read(&nvmet_config_sem);
1374 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1375 enum nvme_subsys_type type)
1377 struct nvmet_subsys *subsys;
1379 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1381 return ERR_PTR(-ENOMEM);
1383 subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
1384 /* generate a random serial number as our controllers are ephemeral: */
1385 get_random_bytes(&subsys->serial, sizeof(subsys->serial));
1389 subsys->max_qid = NVMET_NR_QUEUES;
1392 subsys->max_qid = 0;
1395 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1397 return ERR_PTR(-EINVAL);
1399 subsys->type = type;
1400 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1402 if (!subsys->subsysnqn) {
1404 return ERR_PTR(-ENOMEM);
1407 kref_init(&subsys->ref);
1409 mutex_init(&subsys->lock);
1410 INIT_LIST_HEAD(&subsys->namespaces);
1411 INIT_LIST_HEAD(&subsys->ctrls);
1412 INIT_LIST_HEAD(&subsys->hosts);
1417 static void nvmet_subsys_free(struct kref *ref)
1419 struct nvmet_subsys *subsys =
1420 container_of(ref, struct nvmet_subsys, ref);
1422 WARN_ON_ONCE(!list_empty(&subsys->namespaces));
1424 kfree(subsys->subsysnqn);
1428 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1430 struct nvmet_ctrl *ctrl;
1432 mutex_lock(&subsys->lock);
1433 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1434 ctrl->ops->delete_ctrl(ctrl);
1435 mutex_unlock(&subsys->lock);
1438 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1440 kref_put(&subsys->ref, nvmet_subsys_free);
1443 static int __init nvmet_init(void)
1447 nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1449 buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1451 if (!buffered_io_wq) {
1456 error = nvmet_init_discovery();
1458 goto out_free_work_queue;
1460 error = nvmet_init_configfs();
1462 goto out_exit_discovery;
1466 nvmet_exit_discovery();
1467 out_free_work_queue:
1468 destroy_workqueue(buffered_io_wq);
1473 static void __exit nvmet_exit(void)
1475 nvmet_exit_configfs();
1476 nvmet_exit_discovery();
1477 ida_destroy(&cntlid_ida);
1478 destroy_workqueue(buffered_io_wq);
1480 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1481 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1484 module_init(nvmet_init);
1485 module_exit(nvmet_exit);
1487 MODULE_LICENSE("GPL v2");