1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9 #include <uapi/scsi/fc/fc_els.h>
10 #include <linux/delay.h>
11 #include <linux/overflow.h>
12 #include <linux/blk-cgroup.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
18 #include <scsi/scsi_transport_fc.h>
19 #include <linux/blk-mq-pci.h>
21 /* *************************** Data Structures/Defines ****************** */
24 enum nvme_fc_queue_flags {
25 NVME_FC_Q_CONNECTED = 0,
29 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
30 #define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects
31 * when connected and a
35 struct nvme_fc_queue {
36 struct nvme_fc_ctrl *ctrl;
38 struct blk_mq_hw_ctx *hctx;
40 size_t cmnd_capsule_len;
49 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
51 enum nvme_fcop_flags {
52 FCOP_FLAGS_TERMIO = (1 << 0),
53 FCOP_FLAGS_AEN = (1 << 1),
56 struct nvmefc_ls_req_op {
57 struct nvmefc_ls_req ls_req;
59 struct nvme_fc_rport *rport;
60 struct nvme_fc_queue *queue;
65 struct completion ls_done;
66 struct list_head lsreq_list; /* rport->ls_req_list */
70 struct nvmefc_ls_rcv_op {
71 struct nvme_fc_rport *rport;
72 struct nvmefc_ls_rsp *lsrsp;
73 union nvmefc_ls_requests *rqstbuf;
74 union nvmefc_ls_responses *rspbuf;
78 struct list_head lsrcv_list; /* rport->ls_rcv_list */
79 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
81 enum nvme_fcpop_state {
82 FCPOP_STATE_UNINIT = 0,
84 FCPOP_STATE_ACTIVE = 2,
85 FCPOP_STATE_ABORTED = 3,
86 FCPOP_STATE_COMPLETE = 4,
89 struct nvme_fc_fcp_op {
90 struct nvme_request nreq; /*
93 * the 1st element in the
98 struct nvmefc_fcp_req fcp_req;
100 struct nvme_fc_ctrl *ctrl;
101 struct nvme_fc_queue *queue;
109 struct nvme_fc_cmd_iu cmd_iu;
110 struct nvme_fc_ersp_iu rsp_iu;
113 struct nvme_fcp_op_w_sgl {
114 struct nvme_fc_fcp_op op;
115 struct scatterlist sgl[NVME_INLINE_SG_CNT];
119 struct nvme_fc_lport {
120 struct nvme_fc_local_port localport;
123 struct list_head port_list; /* nvme_fc_port_list */
124 struct list_head endp_list;
125 struct device *dev; /* physical device for dma */
126 struct nvme_fc_port_template *ops;
128 atomic_t act_rport_cnt;
129 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
131 struct nvme_fc_rport {
132 struct nvme_fc_remote_port remoteport;
134 struct list_head endp_list; /* for lport->endp_list */
135 struct list_head ctrl_list;
136 struct list_head ls_req_list;
137 struct list_head ls_rcv_list;
138 struct list_head disc_list;
139 struct device *dev; /* physical device for dma */
140 struct nvme_fc_lport *lport;
143 atomic_t act_ctrl_cnt;
144 unsigned long dev_loss_end;
145 struct work_struct lsrcv_work;
146 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
148 /* fc_ctrl flags values - specified as bit positions */
149 #define ASSOC_ACTIVE 0
150 #define ASSOC_FAILED 1
151 #define FCCTRL_TERMIO 2
153 struct nvme_fc_ctrl {
155 struct nvme_fc_queue *queues;
157 struct nvme_fc_lport *lport;
158 struct nvme_fc_rport *rport;
163 struct nvmefc_ls_rcv_op *rcv_disconn;
165 struct list_head ctrl_list; /* rport->ctrl_list */
167 struct blk_mq_tag_set admin_tag_set;
168 struct blk_mq_tag_set tag_set;
170 struct work_struct ioerr_work;
171 struct delayed_work connect_work;
176 wait_queue_head_t ioabort_wait;
178 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
180 struct nvme_ctrl ctrl;
183 static inline struct nvme_fc_ctrl *
184 to_fc_ctrl(struct nvme_ctrl *ctrl)
186 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
189 static inline struct nvme_fc_lport *
190 localport_to_lport(struct nvme_fc_local_port *portptr)
192 return container_of(portptr, struct nvme_fc_lport, localport);
195 static inline struct nvme_fc_rport *
196 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
198 return container_of(portptr, struct nvme_fc_rport, remoteport);
201 static inline struct nvmefc_ls_req_op *
202 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
204 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
207 static inline struct nvme_fc_fcp_op *
208 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
210 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
215 /* *************************** Globals **************************** */
218 static DEFINE_SPINLOCK(nvme_fc_lock);
220 static LIST_HEAD(nvme_fc_lport_list);
221 static DEFINE_IDA(nvme_fc_local_port_cnt);
222 static DEFINE_IDA(nvme_fc_ctrl_cnt);
225 * These items are short-term. They will eventually be moved into
226 * a generic FC class. See comments in module init.
228 static struct device *fc_udev_device;
230 static void nvme_fc_complete_rq(struct request *rq);
232 /* *********************** FC-NVME Port Management ************************ */
234 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
235 struct nvme_fc_queue *, unsigned int);
237 static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
241 nvme_fc_free_lport(struct kref *ref)
243 struct nvme_fc_lport *lport =
244 container_of(ref, struct nvme_fc_lport, ref);
247 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
248 WARN_ON(!list_empty(&lport->endp_list));
250 /* remove from transport list */
251 spin_lock_irqsave(&nvme_fc_lock, flags);
252 list_del(&lport->port_list);
253 spin_unlock_irqrestore(&nvme_fc_lock, flags);
255 ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
256 ida_destroy(&lport->endp_cnt);
258 put_device(lport->dev);
264 nvme_fc_lport_put(struct nvme_fc_lport *lport)
266 kref_put(&lport->ref, nvme_fc_free_lport);
270 nvme_fc_lport_get(struct nvme_fc_lport *lport)
272 return kref_get_unless_zero(&lport->ref);
276 static struct nvme_fc_lport *
277 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
278 struct nvme_fc_port_template *ops,
281 struct nvme_fc_lport *lport;
284 spin_lock_irqsave(&nvme_fc_lock, flags);
286 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
287 if (lport->localport.node_name != pinfo->node_name ||
288 lport->localport.port_name != pinfo->port_name)
291 if (lport->dev != dev) {
292 lport = ERR_PTR(-EXDEV);
296 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
297 lport = ERR_PTR(-EEXIST);
301 if (!nvme_fc_lport_get(lport)) {
303 * fails if ref cnt already 0. If so,
304 * act as if lport already deleted
310 /* resume the lport */
313 lport->localport.port_role = pinfo->port_role;
314 lport->localport.port_id = pinfo->port_id;
315 lport->localport.port_state = FC_OBJSTATE_ONLINE;
317 spin_unlock_irqrestore(&nvme_fc_lock, flags);
325 spin_unlock_irqrestore(&nvme_fc_lock, flags);
331 * nvme_fc_register_localport - transport entry point called by an
332 * LLDD to register the existence of a NVME
334 * @pinfo: pointer to information about the port to be registered
335 * @template: LLDD entrypoints and operational parameters for the port
336 * @dev: physical hardware device node port corresponds to. Will be
337 * used for DMA mappings
338 * @portptr: pointer to a local port pointer. Upon success, the routine
339 * will allocate a nvme_fc_local_port structure and place its
340 * address in the local port pointer. Upon failure, local port
341 * pointer will be set to 0.
344 * a completion status. Must be 0 upon success; a negative errno
345 * (ex: -ENXIO) upon failure.
348 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
349 struct nvme_fc_port_template *template,
351 struct nvme_fc_local_port **portptr)
353 struct nvme_fc_lport *newrec;
357 if (!template->localport_delete || !template->remoteport_delete ||
358 !template->ls_req || !template->fcp_io ||
359 !template->ls_abort || !template->fcp_abort ||
360 !template->max_hw_queues || !template->max_sgl_segments ||
361 !template->max_dif_sgl_segments || !template->dma_boundary) {
363 goto out_reghost_failed;
367 * look to see if there is already a localport that had been
368 * deregistered and in the process of waiting for all the
369 * references to fully be removed. If the references haven't
370 * expired, we can simply re-enable the localport. Remoteports
371 * and controller reconnections should resume naturally.
373 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
375 /* found an lport, but something about its state is bad */
376 if (IS_ERR(newrec)) {
377 ret = PTR_ERR(newrec);
378 goto out_reghost_failed;
380 /* found existing lport, which was resumed */
382 *portptr = &newrec->localport;
386 /* nothing found - allocate a new localport struct */
388 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
392 goto out_reghost_failed;
395 idx = ida_alloc(&nvme_fc_local_port_cnt, GFP_KERNEL);
401 if (!get_device(dev) && dev) {
406 INIT_LIST_HEAD(&newrec->port_list);
407 INIT_LIST_HEAD(&newrec->endp_list);
408 kref_init(&newrec->ref);
409 atomic_set(&newrec->act_rport_cnt, 0);
410 newrec->ops = template;
412 ida_init(&newrec->endp_cnt);
413 if (template->local_priv_sz)
414 newrec->localport.private = &newrec[1];
416 newrec->localport.private = NULL;
417 newrec->localport.node_name = pinfo->node_name;
418 newrec->localport.port_name = pinfo->port_name;
419 newrec->localport.port_role = pinfo->port_role;
420 newrec->localport.port_id = pinfo->port_id;
421 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
422 newrec->localport.port_num = idx;
424 spin_lock_irqsave(&nvme_fc_lock, flags);
425 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
426 spin_unlock_irqrestore(&nvme_fc_lock, flags);
429 dma_set_seg_boundary(dev, template->dma_boundary);
431 *portptr = &newrec->localport;
435 ida_free(&nvme_fc_local_port_cnt, idx);
443 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
446 * nvme_fc_unregister_localport - transport entry point called by an
447 * LLDD to deregister/remove a previously
448 * registered a NVME host FC port.
449 * @portptr: pointer to the (registered) local port that is to be deregistered.
452 * a completion status. Must be 0 upon success; a negative errno
453 * (ex: -ENXIO) upon failure.
456 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
458 struct nvme_fc_lport *lport = localport_to_lport(portptr);
464 spin_lock_irqsave(&nvme_fc_lock, flags);
466 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
467 spin_unlock_irqrestore(&nvme_fc_lock, flags);
470 portptr->port_state = FC_OBJSTATE_DELETED;
472 spin_unlock_irqrestore(&nvme_fc_lock, flags);
474 if (atomic_read(&lport->act_rport_cnt) == 0)
475 lport->ops->localport_delete(&lport->localport);
477 nvme_fc_lport_put(lport);
481 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
484 * TRADDR strings, per FC-NVME are fixed format:
485 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
486 * udev event will only differ by prefix of what field is
488 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
489 * 19 + 43 + null_fudge = 64 characters
491 #define FCNVME_TRADDR_LENGTH 64
494 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
495 struct nvme_fc_rport *rport)
497 char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/
498 char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/
499 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
501 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
504 snprintf(hostaddr, sizeof(hostaddr),
505 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
506 lport->localport.node_name, lport->localport.port_name);
507 snprintf(tgtaddr, sizeof(tgtaddr),
508 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
509 rport->remoteport.node_name, rport->remoteport.port_name);
510 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
514 nvme_fc_free_rport(struct kref *ref)
516 struct nvme_fc_rport *rport =
517 container_of(ref, struct nvme_fc_rport, ref);
518 struct nvme_fc_lport *lport =
519 localport_to_lport(rport->remoteport.localport);
522 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
523 WARN_ON(!list_empty(&rport->ctrl_list));
525 /* remove from lport list */
526 spin_lock_irqsave(&nvme_fc_lock, flags);
527 list_del(&rport->endp_list);
528 spin_unlock_irqrestore(&nvme_fc_lock, flags);
530 WARN_ON(!list_empty(&rport->disc_list));
531 ida_free(&lport->endp_cnt, rport->remoteport.port_num);
535 nvme_fc_lport_put(lport);
539 nvme_fc_rport_put(struct nvme_fc_rport *rport)
541 kref_put(&rport->ref, nvme_fc_free_rport);
545 nvme_fc_rport_get(struct nvme_fc_rport *rport)
547 return kref_get_unless_zero(&rport->ref);
551 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
553 switch (nvme_ctrl_state(&ctrl->ctrl)) {
555 case NVME_CTRL_CONNECTING:
557 * As all reconnects were suppressed, schedule a
560 dev_info(ctrl->ctrl.device,
561 "NVME-FC{%d}: connectivity re-established. "
562 "Attempting reconnect\n", ctrl->cnum);
564 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
567 case NVME_CTRL_RESETTING:
569 * Controller is already in the process of terminating the
570 * association. No need to do anything further. The reconnect
571 * step will naturally occur after the reset completes.
576 /* no action to take - let it delete */
581 static struct nvme_fc_rport *
582 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
583 struct nvme_fc_port_info *pinfo)
585 struct nvme_fc_rport *rport;
586 struct nvme_fc_ctrl *ctrl;
589 spin_lock_irqsave(&nvme_fc_lock, flags);
591 list_for_each_entry(rport, &lport->endp_list, endp_list) {
592 if (rport->remoteport.node_name != pinfo->node_name ||
593 rport->remoteport.port_name != pinfo->port_name)
596 if (!nvme_fc_rport_get(rport)) {
597 rport = ERR_PTR(-ENOLCK);
601 spin_unlock_irqrestore(&nvme_fc_lock, flags);
603 spin_lock_irqsave(&rport->lock, flags);
605 /* has it been unregistered */
606 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
607 /* means lldd called us twice */
608 spin_unlock_irqrestore(&rport->lock, flags);
609 nvme_fc_rport_put(rport);
610 return ERR_PTR(-ESTALE);
613 rport->remoteport.port_role = pinfo->port_role;
614 rport->remoteport.port_id = pinfo->port_id;
615 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
616 rport->dev_loss_end = 0;
619 * kick off a reconnect attempt on all associations to the
620 * remote port. A successful reconnects will resume i/o.
622 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
623 nvme_fc_resume_controller(ctrl);
625 spin_unlock_irqrestore(&rport->lock, flags);
633 spin_unlock_irqrestore(&nvme_fc_lock, flags);
639 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
640 struct nvme_fc_port_info *pinfo)
642 if (pinfo->dev_loss_tmo)
643 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
645 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
649 * nvme_fc_register_remoteport - transport entry point called by an
650 * LLDD to register the existence of a NVME
651 * subsystem FC port on its fabric.
652 * @localport: pointer to the (registered) local port that the remote
653 * subsystem port is connected to.
654 * @pinfo: pointer to information about the port to be registered
655 * @portptr: pointer to a remote port pointer. Upon success, the routine
656 * will allocate a nvme_fc_remote_port structure and place its
657 * address in the remote port pointer. Upon failure, remote port
658 * pointer will be set to 0.
661 * a completion status. Must be 0 upon success; a negative errno
662 * (ex: -ENXIO) upon failure.
665 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
666 struct nvme_fc_port_info *pinfo,
667 struct nvme_fc_remote_port **portptr)
669 struct nvme_fc_lport *lport = localport_to_lport(localport);
670 struct nvme_fc_rport *newrec;
674 if (!nvme_fc_lport_get(lport)) {
676 goto out_reghost_failed;
680 * look to see if there is already a remoteport that is waiting
681 * for a reconnect (within dev_loss_tmo) with the same WWN's.
682 * If so, transition to it and reconnect.
684 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
686 /* found an rport, but something about its state is bad */
687 if (IS_ERR(newrec)) {
688 ret = PTR_ERR(newrec);
691 /* found existing rport, which was resumed */
693 nvme_fc_lport_put(lport);
694 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
695 nvme_fc_signal_discovery_scan(lport, newrec);
696 *portptr = &newrec->remoteport;
700 /* nothing found - allocate a new remoteport struct */
702 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
709 idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL);
712 goto out_kfree_rport;
715 INIT_LIST_HEAD(&newrec->endp_list);
716 INIT_LIST_HEAD(&newrec->ctrl_list);
717 INIT_LIST_HEAD(&newrec->ls_req_list);
718 INIT_LIST_HEAD(&newrec->disc_list);
719 kref_init(&newrec->ref);
720 atomic_set(&newrec->act_ctrl_cnt, 0);
721 spin_lock_init(&newrec->lock);
722 newrec->remoteport.localport = &lport->localport;
723 INIT_LIST_HEAD(&newrec->ls_rcv_list);
724 newrec->dev = lport->dev;
725 newrec->lport = lport;
726 if (lport->ops->remote_priv_sz)
727 newrec->remoteport.private = &newrec[1];
729 newrec->remoteport.private = NULL;
730 newrec->remoteport.port_role = pinfo->port_role;
731 newrec->remoteport.node_name = pinfo->node_name;
732 newrec->remoteport.port_name = pinfo->port_name;
733 newrec->remoteport.port_id = pinfo->port_id;
734 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
735 newrec->remoteport.port_num = idx;
736 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
737 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work);
739 spin_lock_irqsave(&nvme_fc_lock, flags);
740 list_add_tail(&newrec->endp_list, &lport->endp_list);
741 spin_unlock_irqrestore(&nvme_fc_lock, flags);
743 nvme_fc_signal_discovery_scan(lport, newrec);
745 *portptr = &newrec->remoteport;
751 nvme_fc_lport_put(lport);
756 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
759 nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
761 struct nvmefc_ls_req_op *lsop;
765 spin_lock_irqsave(&rport->lock, flags);
767 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
768 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
769 lsop->flags |= FCOP_FLAGS_TERMIO;
770 spin_unlock_irqrestore(&rport->lock, flags);
771 rport->lport->ops->ls_abort(&rport->lport->localport,
777 spin_unlock_irqrestore(&rport->lock, flags);
783 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
785 dev_info(ctrl->ctrl.device,
786 "NVME-FC{%d}: controller connectivity lost. Awaiting "
787 "Reconnect", ctrl->cnum);
789 switch (nvme_ctrl_state(&ctrl->ctrl)) {
793 * Schedule a controller reset. The reset will terminate the
794 * association and schedule the reconnect timer. Reconnects
795 * will be attempted until either the ctlr_loss_tmo
796 * (max_retries * connect_delay) expires or the remoteport's
797 * dev_loss_tmo expires.
799 if (nvme_reset_ctrl(&ctrl->ctrl)) {
800 dev_warn(ctrl->ctrl.device,
801 "NVME-FC{%d}: Couldn't schedule reset.\n",
803 nvme_delete_ctrl(&ctrl->ctrl);
807 case NVME_CTRL_CONNECTING:
809 * The association has already been terminated and the
810 * controller is attempting reconnects. No need to do anything
811 * futher. Reconnects will be attempted until either the
812 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
813 * remoteport's dev_loss_tmo expires.
817 case NVME_CTRL_RESETTING:
819 * Controller is already in the process of terminating the
820 * association. No need to do anything further. The reconnect
821 * step will kick in naturally after the association is
826 case NVME_CTRL_DELETING:
827 case NVME_CTRL_DELETING_NOIO:
829 /* no action to take - let it delete */
835 * nvme_fc_unregister_remoteport - transport entry point called by an
836 * LLDD to deregister/remove a previously
837 * registered a NVME subsystem FC port.
838 * @portptr: pointer to the (registered) remote port that is to be
842 * a completion status. Must be 0 upon success; a negative errno
843 * (ex: -ENXIO) upon failure.
846 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
848 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
849 struct nvme_fc_ctrl *ctrl;
855 spin_lock_irqsave(&rport->lock, flags);
857 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
858 spin_unlock_irqrestore(&rport->lock, flags);
861 portptr->port_state = FC_OBJSTATE_DELETED;
863 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
865 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
866 /* if dev_loss_tmo==0, dev loss is immediate */
867 if (!portptr->dev_loss_tmo) {
868 dev_warn(ctrl->ctrl.device,
869 "NVME-FC{%d}: controller connectivity lost.\n",
871 nvme_delete_ctrl(&ctrl->ctrl);
873 nvme_fc_ctrl_connectivity_loss(ctrl);
876 spin_unlock_irqrestore(&rport->lock, flags);
878 nvme_fc_abort_lsops(rport);
880 if (atomic_read(&rport->act_ctrl_cnt) == 0)
881 rport->lport->ops->remoteport_delete(portptr);
884 * release the reference, which will allow, if all controllers
885 * go away, which should only occur after dev_loss_tmo occurs,
886 * for the rport to be torn down.
888 nvme_fc_rport_put(rport);
892 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
895 * nvme_fc_rescan_remoteport - transport entry point called by an
896 * LLDD to request a nvme device rescan.
897 * @remoteport: pointer to the (registered) remote port that is to be
903 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
905 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
907 nvme_fc_signal_discovery_scan(rport->lport, rport);
909 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
912 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
915 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
918 spin_lock_irqsave(&rport->lock, flags);
920 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
921 spin_unlock_irqrestore(&rport->lock, flags);
925 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
926 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
928 spin_unlock_irqrestore(&rport->lock, flags);
932 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
935 /* *********************** FC-NVME DMA Handling **************************** */
938 * The fcloop device passes in a NULL device pointer. Real LLD's will
939 * pass in a valid device pointer. If NULL is passed to the dma mapping
940 * routines, depending on the platform, it may or may not succeed, and
944 * Wrapper all the dma routines and check the dev pointer.
946 * If simple mappings (return just a dma address, we'll noop them,
947 * returning a dma address of 0.
949 * On more complex mappings (dma_map_sg), a pseudo routine fills
950 * in the scatter list, setting all dma addresses to 0.
953 static inline dma_addr_t
954 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
955 enum dma_data_direction dir)
957 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
961 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
963 return dev ? dma_mapping_error(dev, dma_addr) : 0;
967 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
968 enum dma_data_direction dir)
971 dma_unmap_single(dev, addr, size, dir);
975 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
976 enum dma_data_direction dir)
979 dma_sync_single_for_cpu(dev, addr, size, dir);
983 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
984 enum dma_data_direction dir)
987 dma_sync_single_for_device(dev, addr, size, dir);
990 /* pseudo dma_map_sg call */
992 fc_map_sg(struct scatterlist *sg, int nents)
994 struct scatterlist *s;
997 WARN_ON(nents == 0 || sg[0].length == 0);
999 for_each_sg(sg, s, nents, i) {
1000 s->dma_address = 0L;
1001 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1002 s->dma_length = s->length;
1009 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1010 enum dma_data_direction dir)
1012 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
1016 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1017 enum dma_data_direction dir)
1020 dma_unmap_sg(dev, sg, nents, dir);
1023 /* *********************** FC-NVME LS Handling **************************** */
1025 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1026 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1028 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1031 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1033 struct nvme_fc_rport *rport = lsop->rport;
1034 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1035 unsigned long flags;
1037 spin_lock_irqsave(&rport->lock, flags);
1039 if (!lsop->req_queued) {
1040 spin_unlock_irqrestore(&rport->lock, flags);
1044 list_del(&lsop->lsreq_list);
1046 lsop->req_queued = false;
1048 spin_unlock_irqrestore(&rport->lock, flags);
1050 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1051 (lsreq->rqstlen + lsreq->rsplen),
1054 nvme_fc_rport_put(rport);
1058 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1059 struct nvmefc_ls_req_op *lsop,
1060 void (*done)(struct nvmefc_ls_req *req, int status))
1062 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1063 unsigned long flags;
1066 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1067 return -ECONNREFUSED;
1069 if (!nvme_fc_rport_get(rport))
1073 lsop->rport = rport;
1074 lsop->req_queued = false;
1075 INIT_LIST_HEAD(&lsop->lsreq_list);
1076 init_completion(&lsop->ls_done);
1078 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1079 lsreq->rqstlen + lsreq->rsplen,
1081 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1085 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1087 spin_lock_irqsave(&rport->lock, flags);
1089 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1091 lsop->req_queued = true;
1093 spin_unlock_irqrestore(&rport->lock, flags);
1095 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1096 &rport->remoteport, lsreq);
1103 lsop->ls_error = ret;
1104 spin_lock_irqsave(&rport->lock, flags);
1105 lsop->req_queued = false;
1106 list_del(&lsop->lsreq_list);
1107 spin_unlock_irqrestore(&rport->lock, flags);
1108 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1109 (lsreq->rqstlen + lsreq->rsplen),
1112 nvme_fc_rport_put(rport);
1118 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1120 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1122 lsop->ls_error = status;
1123 complete(&lsop->ls_done);
1127 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1129 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1130 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1133 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1137 * No timeout/not interruptible as we need the struct
1138 * to exist until the lldd calls us back. Thus mandate
1139 * wait until driver calls back. lldd responsible for
1140 * the timeout action
1142 wait_for_completion(&lsop->ls_done);
1144 __nvme_fc_finish_ls_req(lsop);
1146 ret = lsop->ls_error;
1152 /* ACC or RJT payload ? */
1153 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1160 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1161 struct nvmefc_ls_req_op *lsop,
1162 void (*done)(struct nvmefc_ls_req *req, int status))
1164 /* don't wait for completion */
1166 return __nvme_fc_send_ls_req(rport, lsop, done);
1170 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1171 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1173 struct nvmefc_ls_req_op *lsop;
1174 struct nvmefc_ls_req *lsreq;
1175 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1176 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1177 unsigned long flags;
1180 lsop = kzalloc((sizeof(*lsop) +
1181 sizeof(*assoc_rqst) + sizeof(*assoc_acc) +
1182 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1184 dev_info(ctrl->ctrl.device,
1185 "NVME-FC{%d}: send Create Association failed: ENOMEM\n",
1191 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1];
1192 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1193 lsreq = &lsop->ls_req;
1194 if (ctrl->lport->ops->lsrqst_priv_sz)
1195 lsreq->private = &assoc_acc[1];
1197 lsreq->private = NULL;
1199 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1200 assoc_rqst->desc_list_len =
1201 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1203 assoc_rqst->assoc_cmd.desc_tag =
1204 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1205 assoc_rqst->assoc_cmd.desc_len =
1207 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1209 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1210 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1211 /* Linux supports only Dynamic controllers */
1212 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1213 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1214 strscpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1215 sizeof(assoc_rqst->assoc_cmd.hostnqn));
1216 strscpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1217 sizeof(assoc_rqst->assoc_cmd.subnqn));
1219 lsop->queue = queue;
1220 lsreq->rqstaddr = assoc_rqst;
1221 lsreq->rqstlen = sizeof(*assoc_rqst);
1222 lsreq->rspaddr = assoc_acc;
1223 lsreq->rsplen = sizeof(*assoc_acc);
1224 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1226 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1228 goto out_free_buffer;
1230 /* process connect LS completion */
1232 /* validate the ACC response */
1233 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1235 else if (assoc_acc->hdr.desc_list_len !=
1237 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1238 fcret = VERR_CR_ASSOC_ACC_LEN;
1239 else if (assoc_acc->hdr.rqst.desc_tag !=
1240 cpu_to_be32(FCNVME_LSDESC_RQST))
1241 fcret = VERR_LSDESC_RQST;
1242 else if (assoc_acc->hdr.rqst.desc_len !=
1243 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1244 fcret = VERR_LSDESC_RQST_LEN;
1245 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1246 fcret = VERR_CR_ASSOC;
1247 else if (assoc_acc->associd.desc_tag !=
1248 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1249 fcret = VERR_ASSOC_ID;
1250 else if (assoc_acc->associd.desc_len !=
1252 sizeof(struct fcnvme_lsdesc_assoc_id)))
1253 fcret = VERR_ASSOC_ID_LEN;
1254 else if (assoc_acc->connectid.desc_tag !=
1255 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1256 fcret = VERR_CONN_ID;
1257 else if (assoc_acc->connectid.desc_len !=
1258 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1259 fcret = VERR_CONN_ID_LEN;
1264 "q %d Create Association LS failed: %s\n",
1265 queue->qnum, validation_errors[fcret]);
1267 spin_lock_irqsave(&ctrl->lock, flags);
1268 ctrl->association_id =
1269 be64_to_cpu(assoc_acc->associd.association_id);
1270 queue->connection_id =
1271 be64_to_cpu(assoc_acc->connectid.connection_id);
1272 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1273 spin_unlock_irqrestore(&ctrl->lock, flags);
1281 "queue %d connect admin queue failed (%d).\n",
1287 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1288 u16 qsize, u16 ersp_ratio)
1290 struct nvmefc_ls_req_op *lsop;
1291 struct nvmefc_ls_req *lsreq;
1292 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1293 struct fcnvme_ls_cr_conn_acc *conn_acc;
1296 lsop = kzalloc((sizeof(*lsop) +
1297 sizeof(*conn_rqst) + sizeof(*conn_acc) +
1298 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1300 dev_info(ctrl->ctrl.device,
1301 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n",
1307 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1];
1308 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1309 lsreq = &lsop->ls_req;
1310 if (ctrl->lport->ops->lsrqst_priv_sz)
1311 lsreq->private = (void *)&conn_acc[1];
1313 lsreq->private = NULL;
1315 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1316 conn_rqst->desc_list_len = cpu_to_be32(
1317 sizeof(struct fcnvme_lsdesc_assoc_id) +
1318 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1320 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1321 conn_rqst->associd.desc_len =
1323 sizeof(struct fcnvme_lsdesc_assoc_id));
1324 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1325 conn_rqst->connect_cmd.desc_tag =
1326 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1327 conn_rqst->connect_cmd.desc_len =
1329 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1330 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1331 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1332 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1334 lsop->queue = queue;
1335 lsreq->rqstaddr = conn_rqst;
1336 lsreq->rqstlen = sizeof(*conn_rqst);
1337 lsreq->rspaddr = conn_acc;
1338 lsreq->rsplen = sizeof(*conn_acc);
1339 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1341 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1343 goto out_free_buffer;
1345 /* process connect LS completion */
1347 /* validate the ACC response */
1348 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1350 else if (conn_acc->hdr.desc_list_len !=
1351 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1352 fcret = VERR_CR_CONN_ACC_LEN;
1353 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1354 fcret = VERR_LSDESC_RQST;
1355 else if (conn_acc->hdr.rqst.desc_len !=
1356 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1357 fcret = VERR_LSDESC_RQST_LEN;
1358 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1359 fcret = VERR_CR_CONN;
1360 else if (conn_acc->connectid.desc_tag !=
1361 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1362 fcret = VERR_CONN_ID;
1363 else if (conn_acc->connectid.desc_len !=
1364 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1365 fcret = VERR_CONN_ID_LEN;
1370 "q %d Create I/O Connection LS failed: %s\n",
1371 queue->qnum, validation_errors[fcret]);
1373 queue->connection_id =
1374 be64_to_cpu(conn_acc->connectid.connection_id);
1375 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1383 "queue %d connect I/O queue failed (%d).\n",
1389 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1391 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1393 __nvme_fc_finish_ls_req(lsop);
1395 /* fc-nvme initiator doesn't care about success or failure of cmd */
1401 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1402 * the FC-NVME Association. Terminating the association also
1403 * terminates the FC-NVME connections (per queue, both admin and io
1404 * queues) that are part of the association. E.g. things are torn
1405 * down, and the related FC-NVME Association ID and Connection IDs
1408 * The behavior of the fc-nvme initiator is such that it's
1409 * understanding of the association and connections will implicitly
1410 * be torn down. The action is implicit as it may be due to a loss of
1411 * connectivity with the fc-nvme target, so you may never get a
1412 * response even if you tried. As such, the action of this routine
1413 * is to asynchronously send the LS, ignore any results of the LS, and
1414 * continue on with terminating the association. If the fc-nvme target
1415 * is present and receives the LS, it too can tear down.
1418 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1420 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
1421 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
1422 struct nvmefc_ls_req_op *lsop;
1423 struct nvmefc_ls_req *lsreq;
1426 lsop = kzalloc((sizeof(*lsop) +
1427 sizeof(*discon_rqst) + sizeof(*discon_acc) +
1428 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1430 dev_info(ctrl->ctrl.device,
1431 "NVME-FC{%d}: send Disconnect Association "
1437 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
1438 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
1439 lsreq = &lsop->ls_req;
1440 if (ctrl->lport->ops->lsrqst_priv_sz)
1441 lsreq->private = (void *)&discon_acc[1];
1443 lsreq->private = NULL;
1445 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
1446 ctrl->association_id);
1448 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1449 nvme_fc_disconnect_assoc_done);
1455 nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1457 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
1458 struct nvme_fc_rport *rport = lsop->rport;
1459 struct nvme_fc_lport *lport = rport->lport;
1460 unsigned long flags;
1462 spin_lock_irqsave(&rport->lock, flags);
1463 list_del(&lsop->lsrcv_list);
1464 spin_unlock_irqrestore(&rport->lock, flags);
1466 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
1467 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1468 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1469 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1471 kfree(lsop->rspbuf);
1472 kfree(lsop->rqstbuf);
1475 nvme_fc_rport_put(rport);
1479 nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
1481 struct nvme_fc_rport *rport = lsop->rport;
1482 struct nvme_fc_lport *lport = rport->lport;
1483 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1486 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
1487 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1489 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
1492 dev_warn(lport->dev,
1493 "LLDD rejected LS RSP xmt: LS %d status %d\n",
1495 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
1500 static struct nvme_fc_ctrl *
1501 nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
1502 struct nvmefc_ls_rcv_op *lsop)
1504 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1505 &lsop->rqstbuf->rq_dis_assoc;
1506 struct nvme_fc_ctrl *ctrl, *ret = NULL;
1507 struct nvmefc_ls_rcv_op *oldls = NULL;
1508 u64 association_id = be64_to_cpu(rqst->associd.association_id);
1509 unsigned long flags;
1511 spin_lock_irqsave(&rport->lock, flags);
1513 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
1514 if (!nvme_fc_ctrl_get(ctrl))
1516 spin_lock(&ctrl->lock);
1517 if (association_id == ctrl->association_id) {
1518 oldls = ctrl->rcv_disconn;
1519 ctrl->rcv_disconn = lsop;
1522 spin_unlock(&ctrl->lock);
1524 /* leave the ctrl get reference */
1526 nvme_fc_ctrl_put(ctrl);
1529 spin_unlock_irqrestore(&rport->lock, flags);
1531 /* transmit a response for anything that was pending */
1533 dev_info(rport->lport->dev,
1534 "NVME-FC{%d}: Multiple Disconnect Association "
1535 "LS's received\n", ctrl->cnum);
1536 /* overwrite good response with bogus failure */
1537 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1538 sizeof(*oldls->rspbuf),
1541 FCNVME_RJT_EXP_NONE, 0);
1542 nvme_fc_xmt_ls_rsp(oldls);
1549 * returns true to mean LS handled and ls_rsp can be sent
1550 * returns false to defer ls_rsp xmt (will be done as part of
1551 * association termination)
1554 nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
1556 struct nvme_fc_rport *rport = lsop->rport;
1557 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1558 &lsop->rqstbuf->rq_dis_assoc;
1559 struct fcnvme_ls_disconnect_assoc_acc *acc =
1560 &lsop->rspbuf->rsp_dis_assoc;
1561 struct nvme_fc_ctrl *ctrl = NULL;
1564 memset(acc, 0, sizeof(*acc));
1566 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
1568 /* match an active association */
1569 ctrl = nvme_fc_match_disconn_ls(rport, lsop);
1571 ret = VERR_NO_ASSOC;
1575 dev_info(rport->lport->dev,
1576 "Disconnect LS failed: %s\n",
1577 validation_errors[ret]);
1578 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1579 sizeof(*acc), rqst->w0.ls_cmd,
1580 (ret == VERR_NO_ASSOC) ?
1581 FCNVME_RJT_RC_INV_ASSOC :
1582 FCNVME_RJT_RC_LOGIC,
1583 FCNVME_RJT_EXP_NONE, 0);
1587 /* format an ACCept response */
1589 lsop->lsrsp->rsplen = sizeof(*acc);
1591 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1593 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1594 FCNVME_LS_DISCONNECT_ASSOC);
1597 * the transmit of the response will occur after the exchanges
1598 * for the association have been ABTS'd by
1599 * nvme_fc_delete_association().
1602 /* fail the association */
1603 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
1605 /* release the reference taken by nvme_fc_match_disconn_ls() */
1606 nvme_fc_ctrl_put(ctrl);
1612 * Actual Processing routine for received FC-NVME LS Requests from the LLD
1613 * returns true if a response should be sent afterward, false if rsp will
1614 * be sent asynchronously.
1617 nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
1619 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1622 lsop->lsrsp->nvme_fc_private = lsop;
1623 lsop->lsrsp->rspbuf = lsop->rspbuf;
1624 lsop->lsrsp->rspdma = lsop->rspdma;
1625 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
1626 /* Be preventative. handlers will later set to valid length */
1627 lsop->lsrsp->rsplen = 0;
1631 * parse request input, execute the request, and format the
1634 switch (w0->ls_cmd) {
1635 case FCNVME_LS_DISCONNECT_ASSOC:
1636 ret = nvme_fc_ls_disconnect_assoc(lsop);
1638 case FCNVME_LS_DISCONNECT_CONN:
1639 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1640 sizeof(*lsop->rspbuf), w0->ls_cmd,
1641 FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0);
1643 case FCNVME_LS_CREATE_ASSOCIATION:
1644 case FCNVME_LS_CREATE_CONNECTION:
1645 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1646 sizeof(*lsop->rspbuf), w0->ls_cmd,
1647 FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0);
1650 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1651 sizeof(*lsop->rspbuf), w0->ls_cmd,
1652 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1660 nvme_fc_handle_ls_rqst_work(struct work_struct *work)
1662 struct nvme_fc_rport *rport =
1663 container_of(work, struct nvme_fc_rport, lsrcv_work);
1664 struct fcnvme_ls_rqst_w0 *w0;
1665 struct nvmefc_ls_rcv_op *lsop;
1666 unsigned long flags;
1671 spin_lock_irqsave(&rport->lock, flags);
1672 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
1676 lsop->handled = true;
1677 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
1678 spin_unlock_irqrestore(&rport->lock, flags);
1679 sendrsp = nvme_fc_handle_ls_rqst(lsop);
1681 spin_unlock_irqrestore(&rport->lock, flags);
1682 w0 = &lsop->rqstbuf->w0;
1683 lsop->lsrsp->rsplen = nvme_fc_format_rjt(
1685 sizeof(*lsop->rspbuf),
1688 FCNVME_RJT_EXP_NONE, 0);
1691 nvme_fc_xmt_ls_rsp(lsop);
1694 spin_unlock_irqrestore(&rport->lock, flags);
1698 void nvme_fc_rcv_ls_req_err_msg(struct nvme_fc_lport *lport,
1699 struct fcnvme_ls_rqst_w0 *w0)
1701 dev_info(lport->dev, "RCV %s LS failed: No memory\n",
1702 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1703 nvmefc_ls_names[w0->ls_cmd] : "");
1707 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
1708 * upon the reception of a NVME LS request.
1710 * The nvme-fc layer will copy payload to an internal structure for
1711 * processing. As such, upon completion of the routine, the LLDD may
1712 * immediately free/reuse the LS request buffer passed in the call.
1714 * If this routine returns error, the LLDD should abort the exchange.
1716 * @portptr: pointer to the (registered) remote port that the LS
1717 * was received from. The remoteport is associated with
1718 * a specific localport.
1719 * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be
1720 * used to reference the exchange corresponding to the LS
1721 * when issuing an ls response.
1722 * @lsreqbuf: pointer to the buffer containing the LS Request
1723 * @lsreqbuf_len: length, in bytes, of the received LS request
1726 nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
1727 struct nvmefc_ls_rsp *lsrsp,
1728 void *lsreqbuf, u32 lsreqbuf_len)
1730 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
1731 struct nvme_fc_lport *lport = rport->lport;
1732 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
1733 struct nvmefc_ls_rcv_op *lsop;
1734 unsigned long flags;
1737 nvme_fc_rport_get(rport);
1739 /* validate there's a routine to transmit a response */
1740 if (!lport->ops->xmt_ls_rsp) {
1741 dev_info(lport->dev,
1742 "RCV %s LS failed: no LLDD xmt_ls_rsp\n",
1743 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1744 nvmefc_ls_names[w0->ls_cmd] : "");
1749 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
1750 dev_info(lport->dev,
1751 "RCV %s LS failed: payload too large\n",
1752 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1753 nvmefc_ls_names[w0->ls_cmd] : "");
1758 lsop = kzalloc(sizeof(*lsop), GFP_KERNEL);
1760 nvme_fc_rcv_ls_req_err_msg(lport, w0);
1765 lsop->rqstbuf = kzalloc(sizeof(*lsop->rqstbuf), GFP_KERNEL);
1766 lsop->rspbuf = kzalloc(sizeof(*lsop->rspbuf), GFP_KERNEL);
1767 if (!lsop->rqstbuf || !lsop->rspbuf) {
1768 nvme_fc_rcv_ls_req_err_msg(lport, w0);
1773 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
1774 sizeof(*lsop->rspbuf),
1776 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
1777 dev_info(lport->dev,
1778 "RCV %s LS failed: DMA mapping failure\n",
1779 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1780 nvmefc_ls_names[w0->ls_cmd] : "");
1785 lsop->rport = rport;
1786 lsop->lsrsp = lsrsp;
1788 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
1789 lsop->rqstdatalen = lsreqbuf_len;
1791 spin_lock_irqsave(&rport->lock, flags);
1792 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
1793 spin_unlock_irqrestore(&rport->lock, flags);
1797 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
1798 spin_unlock_irqrestore(&rport->lock, flags);
1800 schedule_work(&rport->lsrcv_work);
1805 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1806 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1808 kfree(lsop->rspbuf);
1809 kfree(lsop->rqstbuf);
1812 nvme_fc_rport_put(rport);
1815 EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
1818 /* *********************** NVME Ctrl Routines **************************** */
1821 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1822 struct nvme_fc_fcp_op *op)
1824 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1825 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1826 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1827 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1829 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1833 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1834 unsigned int hctx_idx)
1836 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1838 return __nvme_fc_exit_request(to_fc_ctrl(set->driver_data), op);
1842 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1844 unsigned long flags;
1847 spin_lock_irqsave(&ctrl->lock, flags);
1848 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1849 if (opstate != FCPOP_STATE_ACTIVE)
1850 atomic_set(&op->state, opstate);
1851 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
1852 op->flags |= FCOP_FLAGS_TERMIO;
1855 spin_unlock_irqrestore(&ctrl->lock, flags);
1857 if (opstate != FCPOP_STATE_ACTIVE)
1860 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1861 &ctrl->rport->remoteport,
1862 op->queue->lldd_handle,
1869 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1871 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1874 /* ensure we've initialized the ops once */
1875 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1878 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1879 __nvme_fc_abort_op(ctrl, aen_op);
1883 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1884 struct nvme_fc_fcp_op *op, int opstate)
1886 unsigned long flags;
1888 if (opstate == FCPOP_STATE_ABORTED) {
1889 spin_lock_irqsave(&ctrl->lock, flags);
1890 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
1891 op->flags & FCOP_FLAGS_TERMIO) {
1893 wake_up(&ctrl->ioabort_wait);
1895 spin_unlock_irqrestore(&ctrl->lock, flags);
1900 nvme_fc_ctrl_ioerr_work(struct work_struct *work)
1902 struct nvme_fc_ctrl *ctrl =
1903 container_of(work, struct nvme_fc_ctrl, ioerr_work);
1905 nvme_fc_error_recovery(ctrl, "transport detected io error");
1909 * nvme_fc_io_getuuid - Routine called to get the appid field
1910 * associated with request by the lldd
1911 * @req:IO request from nvme fc to driver
1912 * Returns: UUID if there is an appid associated with VM or
1913 * NULL if the user/libvirt has not set the appid to VM
1915 char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req)
1917 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1918 struct request *rq = op->rq;
1920 if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio)
1922 return blkcg_get_fc_appid(rq->bio);
1924 EXPORT_SYMBOL_GPL(nvme_fc_io_getuuid);
1927 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1929 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1930 struct request *rq = op->rq;
1931 struct nvmefc_fcp_req *freq = &op->fcp_req;
1932 struct nvme_fc_ctrl *ctrl = op->ctrl;
1933 struct nvme_fc_queue *queue = op->queue;
1934 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1935 struct nvme_command *sqe = &op->cmd_iu.sqe;
1936 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1937 union nvme_result result;
1938 bool terminate_assoc = true;
1943 * The current linux implementation of a nvme controller
1944 * allocates a single tag set for all io queues and sizes
1945 * the io queues to fully hold all possible tags. Thus, the
1946 * implementation does not reference or care about the sqhd
1947 * value as it never needs to use the sqhd/sqtail pointers
1948 * for submission pacing.
1950 * This affects the FC-NVME implementation in two ways:
1951 * 1) As the value doesn't matter, we don't need to waste
1952 * cycles extracting it from ERSPs and stamping it in the
1953 * cases where the transport fabricates CQEs on successful
1955 * 2) The FC-NVME implementation requires that delivery of
1956 * ERSP completions are to go back to the nvme layer in order
1957 * relative to the rsn, such that the sqhd value will always
1958 * be "in order" for the nvme layer. As the nvme layer in
1959 * linux doesn't care about sqhd, there's no need to return
1963 * As the core nvme layer in linux currently does not look at
1964 * every field in the cqe - in cases where the FC transport must
1965 * fabricate a CQE, the following fields will not be set as they
1966 * are not referenced:
1967 * cqe.sqid, cqe.sqhd, cqe.command_id
1969 * Failure or error of an individual i/o, in a transport
1970 * detected fashion unrelated to the nvme completion status,
1971 * potentially cause the initiator and target sides to get out
1972 * of sync on SQ head/tail (aka outstanding io count allowed).
1973 * Per FC-NVME spec, failure of an individual command requires
1974 * the connection to be terminated, which in turn requires the
1975 * association to be terminated.
1978 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1980 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1981 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1983 if (opstate == FCPOP_STATE_ABORTED)
1984 status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1);
1985 else if (freq->status) {
1986 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1987 dev_info(ctrl->ctrl.device,
1988 "NVME-FC{%d}: io failed due to lldd error %d\n",
1989 ctrl->cnum, freq->status);
1993 * For the linux implementation, if we have an unsuccesful
1994 * status, they blk-mq layer can typically be called with the
1995 * non-zero status and the content of the cqe isn't important.
2001 * command completed successfully relative to the wire
2002 * protocol. However, validate anything received and
2003 * extract the status and result from the cqe (create it
2007 switch (freq->rcv_rsplen) {
2010 case NVME_FC_SIZEOF_ZEROS_RSP:
2012 * No response payload or 12 bytes of payload (which
2013 * should all be zeros) are considered successful and
2014 * no payload in the CQE by the transport.
2016 if (freq->transferred_length !=
2017 be32_to_cpu(op->cmd_iu.data_len)) {
2018 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2019 dev_info(ctrl->ctrl.device,
2020 "NVME-FC{%d}: io failed due to bad transfer "
2021 "length: %d vs expected %d\n",
2022 ctrl->cnum, freq->transferred_length,
2023 be32_to_cpu(op->cmd_iu.data_len));
2029 case sizeof(struct nvme_fc_ersp_iu):
2031 * The ERSP IU contains a full completion with CQE.
2032 * Validate ERSP IU and look at cqe.
2034 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
2035 (freq->rcv_rsplen / 4) ||
2036 be32_to_cpu(op->rsp_iu.xfrd_len) !=
2037 freq->transferred_length ||
2038 op->rsp_iu.ersp_result ||
2039 sqe->common.command_id != cqe->command_id)) {
2040 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2041 dev_info(ctrl->ctrl.device,
2042 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
2043 "iu len %d, xfr len %d vs %d, status code "
2044 "%d, cmdid %d vs %d\n",
2045 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
2046 be32_to_cpu(op->rsp_iu.xfrd_len),
2047 freq->transferred_length,
2048 op->rsp_iu.ersp_result,
2049 sqe->common.command_id,
2053 result = cqe->result;
2054 status = cqe->status;
2058 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2059 dev_info(ctrl->ctrl.device,
2060 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
2062 ctrl->cnum, freq->rcv_rsplen);
2066 terminate_assoc = false;
2069 if (op->flags & FCOP_FLAGS_AEN) {
2070 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
2071 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2072 atomic_set(&op->state, FCPOP_STATE_IDLE);
2073 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
2074 nvme_fc_ctrl_put(ctrl);
2078 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2079 if (!nvme_try_complete_req(rq, status, result))
2080 nvme_fc_complete_rq(rq);
2083 if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
2084 queue_work(nvme_reset_wq, &ctrl->ioerr_work);
2088 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
2089 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
2090 struct request *rq, u32 rqno)
2092 struct nvme_fcp_op_w_sgl *op_w_sgl =
2093 container_of(op, typeof(*op_w_sgl), op);
2094 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2097 memset(op, 0, sizeof(*op));
2098 op->fcp_req.cmdaddr = &op->cmd_iu;
2099 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
2100 op->fcp_req.rspaddr = &op->rsp_iu;
2101 op->fcp_req.rsplen = sizeof(op->rsp_iu);
2102 op->fcp_req.done = nvme_fc_fcpio_done;
2108 cmdiu->format_id = NVME_CMD_FORMAT_ID;
2109 cmdiu->fc_id = NVME_CMD_FC_ID;
2110 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
2112 cmdiu->rsv_cat = fccmnd_set_cat_css(0,
2113 (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
2115 cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
2117 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
2118 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
2119 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
2121 "FCP Op failed - cmdiu dma mapping failed.\n");
2126 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
2127 &op->rsp_iu, sizeof(op->rsp_iu),
2129 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
2131 "FCP Op failed - rspiu dma mapping failed.\n");
2135 atomic_set(&op->state, FCPOP_STATE_IDLE);
2141 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
2142 unsigned int hctx_idx, unsigned int numa_node)
2144 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
2145 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
2146 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
2147 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
2150 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
2153 op->op.fcp_req.first_sgl = op->sgl;
2154 op->op.fcp_req.private = &op->priv[0];
2155 nvme_req(rq)->ctrl = &ctrl->ctrl;
2156 nvme_req(rq)->cmd = &op->op.cmd_iu.sqe;
2161 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
2163 struct nvme_fc_fcp_op *aen_op;
2164 struct nvme_fc_cmd_iu *cmdiu;
2165 struct nvme_command *sqe;
2166 void *private = NULL;
2169 aen_op = ctrl->aen_ops;
2170 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2171 if (ctrl->lport->ops->fcprqst_priv_sz) {
2172 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
2178 cmdiu = &aen_op->cmd_iu;
2180 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
2181 aen_op, (struct request *)NULL,
2182 (NVME_AQ_BLK_MQ_DEPTH + i));
2188 aen_op->flags = FCOP_FLAGS_AEN;
2189 aen_op->fcp_req.private = private;
2191 memset(sqe, 0, sizeof(*sqe));
2192 sqe->common.opcode = nvme_admin_async_event;
2193 /* Note: core layer may overwrite the sqe.command_id value */
2194 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
2200 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
2202 struct nvme_fc_fcp_op *aen_op;
2205 cancel_work_sync(&ctrl->ctrl.async_event_work);
2206 aen_op = ctrl->aen_ops;
2207 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2208 __nvme_fc_exit_request(ctrl, aen_op);
2210 kfree(aen_op->fcp_req.private);
2211 aen_op->fcp_req.private = NULL;
2216 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int qidx)
2218 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data);
2219 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
2221 hctx->driver_data = queue;
2227 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx)
2229 return __nvme_fc_init_hctx(hctx, data, hctx_idx + 1);
2233 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2234 unsigned int hctx_idx)
2236 return __nvme_fc_init_hctx(hctx, data, hctx_idx);
2240 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
2242 struct nvme_fc_queue *queue;
2244 queue = &ctrl->queues[idx];
2245 memset(queue, 0, sizeof(*queue));
2248 atomic_set(&queue->csn, 0);
2249 queue->dev = ctrl->dev;
2252 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
2254 queue->cmnd_capsule_len = sizeof(struct nvme_command);
2257 * Considered whether we should allocate buffers for all SQEs
2258 * and CQEs and dma map them - mapping their respective entries
2259 * into the request structures (kernel vm addr and dma address)
2260 * thus the driver could use the buffers/mappings directly.
2261 * It only makes sense if the LLDD would use them for its
2262 * messaging api. It's very unlikely most adapter api's would use
2263 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
2264 * structures were used instead.
2269 * This routine terminates a queue at the transport level.
2270 * The transport has already ensured that all outstanding ios on
2271 * the queue have been terminated.
2272 * The transport will send a Disconnect LS request to terminate
2273 * the queue's connection. Termination of the admin queue will also
2274 * terminate the association at the target.
2277 nvme_fc_free_queue(struct nvme_fc_queue *queue)
2279 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
2282 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
2284 * Current implementation never disconnects a single queue.
2285 * It always terminates a whole association. So there is never
2286 * a disconnect(queue) LS sent to the target.
2289 queue->connection_id = 0;
2290 atomic_set(&queue->csn, 0);
2294 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
2295 struct nvme_fc_queue *queue, unsigned int qidx)
2297 if (ctrl->lport->ops->delete_queue)
2298 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
2299 queue->lldd_handle);
2300 queue->lldd_handle = NULL;
2304 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
2308 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2309 nvme_fc_free_queue(&ctrl->queues[i]);
2313 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
2314 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
2318 queue->lldd_handle = NULL;
2319 if (ctrl->lport->ops->create_queue)
2320 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
2321 qidx, qsize, &queue->lldd_handle);
2327 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
2329 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
2332 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
2333 __nvme_fc_delete_hw_queue(ctrl, queue, i);
2337 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2339 struct nvme_fc_queue *queue = &ctrl->queues[1];
2342 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
2343 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
2352 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
2357 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2361 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
2362 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
2366 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
2370 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
2377 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2381 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2382 nvme_fc_init_queue(ctrl, i);
2386 nvme_fc_ctrl_free(struct kref *ref)
2388 struct nvme_fc_ctrl *ctrl =
2389 container_of(ref, struct nvme_fc_ctrl, ref);
2390 unsigned long flags;
2392 if (ctrl->ctrl.tagset)
2393 nvme_remove_io_tag_set(&ctrl->ctrl);
2395 /* remove from rport list */
2396 spin_lock_irqsave(&ctrl->rport->lock, flags);
2397 list_del(&ctrl->ctrl_list);
2398 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2400 nvme_unquiesce_admin_queue(&ctrl->ctrl);
2401 nvme_remove_admin_tag_set(&ctrl->ctrl);
2403 kfree(ctrl->queues);
2405 put_device(ctrl->dev);
2406 nvme_fc_rport_put(ctrl->rport);
2408 ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
2409 if (ctrl->ctrl.opts)
2410 nvmf_free_options(ctrl->ctrl.opts);
2415 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2417 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2421 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2423 return kref_get_unless_zero(&ctrl->ref);
2427 * All accesses from nvme core layer done - can now free the
2428 * controller. Called after last nvme_put_ctrl() call
2431 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2433 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2435 WARN_ON(nctrl != &ctrl->ctrl);
2437 nvme_fc_ctrl_put(ctrl);
2441 * This routine is used by the transport when it needs to find active
2442 * io on a queue that is to be terminated. The transport uses
2443 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2444 * this routine to kill them on a 1 by 1 basis.
2446 * As FC allocates FC exchange for each io, the transport must contact
2447 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2448 * After terminating the exchange the LLDD will call the transport's
2449 * normal io done path for the request, but it will have an aborted
2450 * status. The done path will return the io request back to the block
2451 * layer with an error status.
2453 static bool nvme_fc_terminate_exchange(struct request *req, void *data)
2455 struct nvme_ctrl *nctrl = data;
2456 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2457 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2459 op->nreq.flags |= NVME_REQ_CANCELLED;
2460 __nvme_fc_abort_op(ctrl, op);
2465 * This routine runs through all outstanding commands on the association
2466 * and aborts them. This routine is typically be called by the
2467 * delete_association routine. It is also called due to an error during
2468 * reconnect. In that scenario, it is most likely a command that initializes
2469 * the controller, including fabric Connect commands on io queues, that
2470 * may have timed out or failed thus the io must be killed for the connect
2471 * thread to see the error.
2474 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
2479 * if aborting io, the queues are no longer good, mark them
2482 if (ctrl->ctrl.queue_count > 1) {
2483 for (q = 1; q < ctrl->ctrl.queue_count; q++)
2484 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
2486 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2489 * If io queues are present, stop them and terminate all outstanding
2490 * ios on them. As FC allocates FC exchange for each io, the
2491 * transport must contact the LLDD to terminate the exchange,
2492 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2493 * to tell us what io's are busy and invoke a transport routine
2494 * to kill them with the LLDD. After terminating the exchange
2495 * the LLDD will call the transport's normal io done path, but it
2496 * will have an aborted status. The done path will return the
2497 * io requests back to the block layer as part of normal completions
2498 * (but with error status).
2500 if (ctrl->ctrl.queue_count > 1) {
2501 nvme_quiesce_io_queues(&ctrl->ctrl);
2502 nvme_sync_io_queues(&ctrl->ctrl);
2503 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2504 nvme_fc_terminate_exchange, &ctrl->ctrl);
2505 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
2507 nvme_unquiesce_io_queues(&ctrl->ctrl);
2511 * Other transports, which don't have link-level contexts bound
2512 * to sqe's, would try to gracefully shutdown the controller by
2513 * writing the registers for shutdown and polling (call
2514 * nvme_disable_ctrl()). Given a bunch of i/o was potentially
2515 * just aborted and we will wait on those contexts, and given
2516 * there was no indication of how live the controlelr is on the
2517 * link, don't send more io to create more contexts for the
2518 * shutdown. Let the controller fail via keepalive failure if
2519 * its still present.
2523 * clean up the admin queue. Same thing as above.
2525 nvme_quiesce_admin_queue(&ctrl->ctrl);
2526 blk_sync_queue(ctrl->ctrl.admin_q);
2527 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2528 nvme_fc_terminate_exchange, &ctrl->ctrl);
2529 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
2531 nvme_unquiesce_admin_queue(&ctrl->ctrl);
2535 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2538 * if an error (io timeout, etc) while (re)connecting, the remote
2539 * port requested terminating of the association (disconnect_ls)
2540 * or an error (timeout or abort) occurred on an io while creating
2541 * the controller. Abort any ios on the association and let the
2542 * create_association error path resolve things.
2544 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2545 __nvme_fc_abort_outstanding_ios(ctrl, true);
2546 set_bit(ASSOC_FAILED, &ctrl->flags);
2547 dev_warn(ctrl->ctrl.device,
2548 "NVME-FC{%d}: transport error during (re)connect\n",
2553 /* Otherwise, only proceed if in LIVE state - e.g. on first error */
2554 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2557 dev_warn(ctrl->ctrl.device,
2558 "NVME-FC{%d}: transport association event: %s\n",
2559 ctrl->cnum, errmsg);
2560 dev_warn(ctrl->ctrl.device,
2561 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2563 nvme_reset_ctrl(&ctrl->ctrl);
2566 static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
2568 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2569 struct nvme_fc_ctrl *ctrl = op->ctrl;
2570 u16 qnum = op->queue->qnum;
2571 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2572 struct nvme_command *sqe = &cmdiu->sqe;
2575 * Attempt to abort the offending command. Command completion
2576 * will detect the aborted io and will fail the connection.
2578 dev_info(ctrl->ctrl.device,
2579 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d (%s) w10/11: "
2581 ctrl->cnum, qnum, sqe->common.opcode, sqe->fabrics.fctype,
2582 nvme_fabrics_opcode_str(qnum, sqe),
2583 sqe->common.cdw10, sqe->common.cdw11);
2584 if (__nvme_fc_abort_op(ctrl, op))
2585 nvme_fc_error_recovery(ctrl, "io timeout abort failed");
2588 * the io abort has been initiated. Have the reset timer
2589 * restarted and the abort completion will complete the io
2590 * shortly. Avoids a synchronous wait while the abort finishes.
2592 return BLK_EH_RESET_TIMER;
2596 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2597 struct nvme_fc_fcp_op *op)
2599 struct nvmefc_fcp_req *freq = &op->fcp_req;
2604 if (!blk_rq_nr_phys_segments(rq))
2607 freq->sg_table.sgl = freq->first_sgl;
2608 ret = sg_alloc_table_chained(&freq->sg_table,
2609 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
2610 NVME_INLINE_SG_CNT);
2614 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2615 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2616 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2617 op->nents, rq_dma_dir(rq));
2618 if (unlikely(freq->sg_cnt <= 0)) {
2619 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2625 * TODO: blk_integrity_rq(rq) for DIF
2631 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2632 struct nvme_fc_fcp_op *op)
2634 struct nvmefc_fcp_req *freq = &op->fcp_req;
2639 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2642 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2648 * In FC, the queue is a logical thing. At transport connect, the target
2649 * creates its "queue" and returns a handle that is to be given to the
2650 * target whenever it posts something to the corresponding SQ. When an
2651 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2652 * command contained within the SQE, an io, and assigns a FC exchange
2653 * to it. The SQE and the associated SQ handle are sent in the initial
2654 * CMD IU sents on the exchange. All transfers relative to the io occur
2655 * as part of the exchange. The CQE is the last thing for the io,
2656 * which is transferred (explicitly or implicitly) with the RSP IU
2657 * sent on the exchange. After the CQE is received, the FC exchange is
2658 * terminaed and the Exchange may be used on a different io.
2660 * The transport to LLDD api has the transport making a request for a
2661 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2662 * resource and transfers the command. The LLDD will then process all
2663 * steps to complete the io. Upon completion, the transport done routine
2666 * So - while the operation is outstanding to the LLDD, there is a link
2667 * level FC exchange resource that is also outstanding. This must be
2668 * considered in all cleanup operations.
2671 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2672 struct nvme_fc_fcp_op *op, u32 data_len,
2673 enum nvmefc_fcp_datadir io_dir)
2675 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2676 struct nvme_command *sqe = &cmdiu->sqe;
2680 * before attempting to send the io, check to see if we believe
2681 * the target device is present
2683 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2684 return BLK_STS_RESOURCE;
2686 if (!nvme_fc_ctrl_get(ctrl))
2687 return BLK_STS_IOERR;
2689 /* format the FC-NVME CMD IU and fcp_req */
2690 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2691 cmdiu->data_len = cpu_to_be32(data_len);
2693 case NVMEFC_FCP_WRITE:
2694 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2696 case NVMEFC_FCP_READ:
2697 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2699 case NVMEFC_FCP_NODATA:
2703 op->fcp_req.payload_length = data_len;
2704 op->fcp_req.io_dir = io_dir;
2705 op->fcp_req.transferred_length = 0;
2706 op->fcp_req.rcv_rsplen = 0;
2707 op->fcp_req.status = NVME_SC_SUCCESS;
2708 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2711 * validate per fabric rules, set fields mandated by fabric spec
2712 * as well as those by FC-NVME spec.
2714 WARN_ON_ONCE(sqe->common.metadata);
2715 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2718 * format SQE DPTR field per FC-NVME rules:
2719 * type=0x5 Transport SGL Data Block Descriptor
2720 * subtype=0xA Transport-specific value
2722 * length=length of the data series
2724 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2725 NVME_SGL_FMT_TRANSPORT_A;
2726 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2727 sqe->rw.dptr.sgl.addr = 0;
2729 if (!(op->flags & FCOP_FLAGS_AEN)) {
2730 ret = nvme_fc_map_data(ctrl, op->rq, op);
2732 nvme_cleanup_cmd(op->rq);
2733 nvme_fc_ctrl_put(ctrl);
2734 if (ret == -ENOMEM || ret == -EAGAIN)
2735 return BLK_STS_RESOURCE;
2736 return BLK_STS_IOERR;
2740 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2741 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2743 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2745 if (!(op->flags & FCOP_FLAGS_AEN))
2746 nvme_start_request(op->rq);
2748 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
2749 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2750 &ctrl->rport->remoteport,
2751 queue->lldd_handle, &op->fcp_req);
2755 * If the lld fails to send the command is there an issue with
2756 * the csn value? If the command that fails is the Connect,
2757 * no - as the connection won't be live. If it is a command
2758 * post-connect, it's possible a gap in csn may be created.
2759 * Does this matter? As Linux initiators don't send fused
2760 * commands, no. The gap would exist, but as there's nothing
2761 * that depends on csn order to be delivered on the target
2762 * side, it shouldn't hurt. It would be difficult for a
2763 * target to even detect the csn gap as it has no idea when the
2764 * cmd with the csn was supposed to arrive.
2766 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2767 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2769 if (!(op->flags & FCOP_FLAGS_AEN)) {
2770 nvme_fc_unmap_data(ctrl, op->rq, op);
2771 nvme_cleanup_cmd(op->rq);
2774 nvme_fc_ctrl_put(ctrl);
2776 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2778 return BLK_STS_IOERR;
2780 return BLK_STS_RESOURCE;
2787 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2788 const struct blk_mq_queue_data *bd)
2790 struct nvme_ns *ns = hctx->queue->queuedata;
2791 struct nvme_fc_queue *queue = hctx->driver_data;
2792 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2793 struct request *rq = bd->rq;
2794 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2795 enum nvmefc_fcp_datadir io_dir;
2796 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2800 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2801 !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2802 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2804 ret = nvme_setup_cmd(ns, rq);
2809 * nvme core doesn't quite treat the rq opaquely. Commands such
2810 * as WRITE ZEROES will return a non-zero rq payload_bytes yet
2811 * there is no actual payload to be transferred.
2812 * To get it right, key data transmission on there being 1 or
2813 * more physical segments in the sg list. If there is no
2814 * physical segments, there is no payload.
2816 if (blk_rq_nr_phys_segments(rq)) {
2817 data_len = blk_rq_payload_bytes(rq);
2818 io_dir = ((rq_data_dir(rq) == WRITE) ?
2819 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2822 io_dir = NVMEFC_FCP_NODATA;
2826 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2830 nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2832 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2833 struct nvme_fc_fcp_op *aen_op;
2836 if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
2839 aen_op = &ctrl->aen_ops[0];
2841 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2844 dev_err(ctrl->ctrl.device,
2845 "failed async event work\n");
2849 nvme_fc_complete_rq(struct request *rq)
2851 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2852 struct nvme_fc_ctrl *ctrl = op->ctrl;
2854 atomic_set(&op->state, FCPOP_STATE_IDLE);
2855 op->flags &= ~FCOP_FLAGS_TERMIO;
2857 nvme_fc_unmap_data(ctrl, rq, op);
2858 nvme_complete_rq(rq);
2859 nvme_fc_ctrl_put(ctrl);
2862 static void nvme_fc_map_queues(struct blk_mq_tag_set *set)
2864 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
2867 for (i = 0; i < set->nr_maps; i++) {
2868 struct blk_mq_queue_map *map = &set->map[i];
2870 if (!map->nr_queues) {
2871 WARN_ON(i == HCTX_TYPE_DEFAULT);
2875 /* Call LLDD map queue functionality if defined */
2876 if (ctrl->lport->ops->map_queues)
2877 ctrl->lport->ops->map_queues(&ctrl->lport->localport,
2880 blk_mq_map_queues(map);
2884 static const struct blk_mq_ops nvme_fc_mq_ops = {
2885 .queue_rq = nvme_fc_queue_rq,
2886 .complete = nvme_fc_complete_rq,
2887 .init_request = nvme_fc_init_request,
2888 .exit_request = nvme_fc_exit_request,
2889 .init_hctx = nvme_fc_init_hctx,
2890 .timeout = nvme_fc_timeout,
2891 .map_queues = nvme_fc_map_queues,
2895 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2897 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2898 unsigned int nr_io_queues;
2901 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2902 ctrl->lport->ops->max_hw_queues);
2903 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2905 dev_info(ctrl->ctrl.device,
2906 "set_queue_count failed: %d\n", ret);
2910 ctrl->ctrl.queue_count = nr_io_queues + 1;
2914 nvme_fc_init_io_queues(ctrl);
2916 ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
2918 struct_size_t(struct nvme_fcp_op_w_sgl, priv,
2919 ctrl->lport->ops->fcprqst_priv_sz));
2923 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2925 goto out_cleanup_tagset;
2927 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2929 goto out_delete_hw_queues;
2931 ctrl->ioq_live = true;
2935 out_delete_hw_queues:
2936 nvme_fc_delete_hw_io_queues(ctrl);
2938 nvme_remove_io_tag_set(&ctrl->ctrl);
2939 nvme_fc_free_io_queues(ctrl);
2941 /* force put free routine to ignore io queues */
2942 ctrl->ctrl.tagset = NULL;
2948 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2950 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2951 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2952 unsigned int nr_io_queues;
2955 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2956 ctrl->lport->ops->max_hw_queues);
2957 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2959 dev_info(ctrl->ctrl.device,
2960 "set_queue_count failed: %d\n", ret);
2964 if (!nr_io_queues && prior_ioq_cnt) {
2965 dev_info(ctrl->ctrl.device,
2966 "Fail Reconnect: At least 1 io queue "
2967 "required (was %d)\n", prior_ioq_cnt);
2971 ctrl->ctrl.queue_count = nr_io_queues + 1;
2972 /* check for io queues existing */
2973 if (ctrl->ctrl.queue_count == 1)
2976 if (prior_ioq_cnt != nr_io_queues) {
2977 dev_info(ctrl->ctrl.device,
2978 "reconnect: revising io queue count from %d to %d\n",
2979 prior_ioq_cnt, nr_io_queues);
2980 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2983 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2985 goto out_free_io_queues;
2987 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2989 goto out_delete_hw_queues;
2993 out_delete_hw_queues:
2994 nvme_fc_delete_hw_io_queues(ctrl);
2996 nvme_fc_free_io_queues(ctrl);
3001 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
3003 struct nvme_fc_lport *lport = rport->lport;
3005 atomic_inc(&lport->act_rport_cnt);
3009 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
3011 struct nvme_fc_lport *lport = rport->lport;
3014 cnt = atomic_dec_return(&lport->act_rport_cnt);
3015 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
3016 lport->ops->localport_delete(&lport->localport);
3020 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
3022 struct nvme_fc_rport *rport = ctrl->rport;
3025 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
3028 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
3030 nvme_fc_rport_active_on_lport(rport);
3036 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
3038 struct nvme_fc_rport *rport = ctrl->rport;
3039 struct nvme_fc_lport *lport = rport->lport;
3042 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */
3044 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
3046 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
3047 lport->ops->remoteport_delete(&rport->remoteport);
3048 nvme_fc_rport_inactive_on_lport(rport);
3055 * This routine restarts the controller on the host side, and
3056 * on the link side, recreates the controller association.
3059 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
3061 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
3062 struct nvmefc_ls_rcv_op *disls = NULL;
3063 unsigned long flags;
3067 ++ctrl->ctrl.nr_reconnects;
3069 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3072 if (nvme_fc_ctlr_active_on_rport(ctrl))
3075 dev_info(ctrl->ctrl.device,
3076 "NVME-FC{%d}: create association : host wwpn 0x%016llx "
3077 " rport wwpn 0x%016llx: NQN \"%s\"\n",
3078 ctrl->cnum, ctrl->lport->localport.port_name,
3079 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
3081 clear_bit(ASSOC_FAILED, &ctrl->flags);
3084 * Create the admin queue
3087 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
3090 goto out_free_queue;
3092 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
3093 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
3095 goto out_delete_hw_queue;
3097 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
3099 goto out_disconnect_admin_queue;
3101 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
3104 * Check controller capabilities
3106 * todo:- add code to check if ctrl attributes changed from
3107 * prior connection values
3110 ret = nvme_enable_ctrl(&ctrl->ctrl);
3111 if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
3114 goto out_disconnect_admin_queue;
3116 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
3117 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
3120 nvme_unquiesce_admin_queue(&ctrl->ctrl);
3122 ret = nvme_init_ctrl_finish(&ctrl->ctrl, false);
3124 goto out_disconnect_admin_queue;
3125 if (test_bit(ASSOC_FAILED, &ctrl->flags)) {
3127 goto out_stop_keep_alive;
3131 /* FC-NVME does not have other data in the capsule */
3132 if (ctrl->ctrl.icdoff) {
3133 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
3135 ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
3136 goto out_stop_keep_alive;
3139 /* FC-NVME supports normal SGL Data Block Descriptors */
3140 if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
3141 dev_err(ctrl->ctrl.device,
3142 "Mandatory sgls are not supported!\n");
3143 ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
3144 goto out_stop_keep_alive;
3147 if (opts->queue_size > ctrl->ctrl.maxcmd) {
3148 /* warn if maxcmd is lower than queue_size */
3149 dev_warn(ctrl->ctrl.device,
3150 "queue_size %zu > ctrl maxcmd %u, reducing "
3152 opts->queue_size, ctrl->ctrl.maxcmd);
3153 opts->queue_size = ctrl->ctrl.maxcmd;
3154 ctrl->ctrl.sqsize = opts->queue_size - 1;
3157 ret = nvme_fc_init_aen_ops(ctrl);
3159 goto out_term_aen_ops;
3162 * Create the io queues
3165 if (ctrl->ctrl.queue_count > 1) {
3166 if (!ctrl->ioq_live)
3167 ret = nvme_fc_create_io_queues(ctrl);
3169 ret = nvme_fc_recreate_io_queues(ctrl);
3171 if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
3174 goto out_term_aen_ops;
3176 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
3178 ctrl->ctrl.nr_reconnects = 0;
3181 nvme_start_ctrl(&ctrl->ctrl);
3183 return 0; /* Success */
3186 nvme_fc_term_aen_ops(ctrl);
3187 out_stop_keep_alive:
3188 nvme_stop_keep_alive(&ctrl->ctrl);
3189 out_disconnect_admin_queue:
3190 dev_warn(ctrl->ctrl.device,
3191 "NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n",
3192 ctrl->cnum, ctrl->association_id, ret);
3193 /* send a Disconnect(association) LS to fc-nvme target */
3194 nvme_fc_xmt_disconnect_assoc(ctrl);
3195 spin_lock_irqsave(&ctrl->lock, flags);
3196 ctrl->association_id = 0;
3197 disls = ctrl->rcv_disconn;
3198 ctrl->rcv_disconn = NULL;
3199 spin_unlock_irqrestore(&ctrl->lock, flags);
3201 nvme_fc_xmt_ls_rsp(disls);
3202 out_delete_hw_queue:
3203 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3205 nvme_fc_free_queue(&ctrl->queues[0]);
3206 clear_bit(ASSOC_ACTIVE, &ctrl->flags);
3207 nvme_fc_ctlr_inactive_on_rport(ctrl);
3214 * This routine stops operation of the controller on the host side.
3215 * On the host os stack side: Admin and IO queues are stopped,
3216 * outstanding ios on them terminated via FC ABTS.
3217 * On the link side: the association is terminated.
3220 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
3222 struct nvmefc_ls_rcv_op *disls = NULL;
3223 unsigned long flags;
3225 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
3228 spin_lock_irqsave(&ctrl->lock, flags);
3229 set_bit(FCCTRL_TERMIO, &ctrl->flags);
3231 spin_unlock_irqrestore(&ctrl->lock, flags);
3233 __nvme_fc_abort_outstanding_ios(ctrl, false);
3235 /* kill the aens as they are a separate path */
3236 nvme_fc_abort_aen_ops(ctrl);
3238 /* wait for all io that had to be aborted */
3239 spin_lock_irq(&ctrl->lock);
3240 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
3241 clear_bit(FCCTRL_TERMIO, &ctrl->flags);
3242 spin_unlock_irq(&ctrl->lock);
3244 nvme_fc_term_aen_ops(ctrl);
3247 * send a Disconnect(association) LS to fc-nvme target
3248 * Note: could have been sent at top of process, but
3249 * cleaner on link traffic if after the aborts complete.
3250 * Note: if association doesn't exist, association_id will be 0
3252 if (ctrl->association_id)
3253 nvme_fc_xmt_disconnect_assoc(ctrl);
3255 spin_lock_irqsave(&ctrl->lock, flags);
3256 ctrl->association_id = 0;
3257 disls = ctrl->rcv_disconn;
3258 ctrl->rcv_disconn = NULL;
3259 spin_unlock_irqrestore(&ctrl->lock, flags);
3262 * if a Disconnect Request was waiting for a response, send
3263 * now that all ABTS's have been issued (and are complete).
3265 nvme_fc_xmt_ls_rsp(disls);
3267 if (ctrl->ctrl.tagset) {
3268 nvme_fc_delete_hw_io_queues(ctrl);
3269 nvme_fc_free_io_queues(ctrl);
3272 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3273 nvme_fc_free_queue(&ctrl->queues[0]);
3275 /* re-enable the admin_q so anything new can fast fail */
3276 nvme_unquiesce_admin_queue(&ctrl->ctrl);
3278 /* resume the io queues so that things will fast fail */
3279 nvme_unquiesce_io_queues(&ctrl->ctrl);
3281 nvme_fc_ctlr_inactive_on_rport(ctrl);
3285 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
3287 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
3289 cancel_work_sync(&ctrl->ioerr_work);
3290 cancel_delayed_work_sync(&ctrl->connect_work);
3292 * kill the association on the link side. this will block
3293 * waiting for io to terminate
3295 nvme_fc_delete_association(ctrl);
3299 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
3301 struct nvme_fc_rport *rport = ctrl->rport;
3302 struct nvme_fc_remote_port *portptr = &rport->remoteport;
3303 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
3306 if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_CONNECTING)
3309 if (portptr->port_state == FC_OBJSTATE_ONLINE) {
3310 dev_info(ctrl->ctrl.device,
3311 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
3312 ctrl->cnum, status);
3313 if (status > 0 && (status & NVME_SC_DNR))
3315 } else if (time_after_eq(jiffies, rport->dev_loss_end))
3318 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
3319 if (portptr->port_state == FC_OBJSTATE_ONLINE)
3320 dev_info(ctrl->ctrl.device,
3321 "NVME-FC{%d}: Reconnect attempt in %ld "
3323 ctrl->cnum, recon_delay / HZ);
3324 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
3325 recon_delay = rport->dev_loss_end - jiffies;
3327 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
3329 if (portptr->port_state == FC_OBJSTATE_ONLINE) {
3330 if (status > 0 && (status & NVME_SC_DNR))
3331 dev_warn(ctrl->ctrl.device,
3332 "NVME-FC{%d}: reconnect failure\n",
3335 dev_warn(ctrl->ctrl.device,
3336 "NVME-FC{%d}: Max reconnect attempts "
3338 ctrl->cnum, ctrl->ctrl.nr_reconnects);
3340 dev_warn(ctrl->ctrl.device,
3341 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
3342 "while waiting for remoteport connectivity.\n",
3343 ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
3344 (ctrl->ctrl.opts->max_reconnects *
3345 ctrl->ctrl.opts->reconnect_delay)));
3346 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
3351 nvme_fc_reset_ctrl_work(struct work_struct *work)
3353 struct nvme_fc_ctrl *ctrl =
3354 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
3356 nvme_stop_ctrl(&ctrl->ctrl);
3358 /* will block will waiting for io to terminate */
3359 nvme_fc_delete_association(ctrl);
3361 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
3362 dev_err(ctrl->ctrl.device,
3363 "NVME-FC{%d}: error_recovery: Couldn't change state "
3364 "to CONNECTING\n", ctrl->cnum);
3366 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
3367 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3368 dev_err(ctrl->ctrl.device,
3369 "NVME-FC{%d}: failed to schedule connect "
3370 "after reset\n", ctrl->cnum);
3372 flush_delayed_work(&ctrl->connect_work);
3375 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
3380 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
3382 .module = THIS_MODULE,
3383 .flags = NVME_F_FABRICS,
3384 .reg_read32 = nvmf_reg_read32,
3385 .reg_read64 = nvmf_reg_read64,
3386 .reg_write32 = nvmf_reg_write32,
3387 .free_ctrl = nvme_fc_nvme_ctrl_freed,
3388 .submit_async_event = nvme_fc_submit_async_event,
3389 .delete_ctrl = nvme_fc_delete_ctrl,
3390 .get_address = nvmf_get_address,
3394 nvme_fc_connect_ctrl_work(struct work_struct *work)
3398 struct nvme_fc_ctrl *ctrl =
3399 container_of(to_delayed_work(work),
3400 struct nvme_fc_ctrl, connect_work);
3402 ret = nvme_fc_create_association(ctrl);
3404 nvme_fc_reconnect_or_delete(ctrl, ret);
3406 dev_info(ctrl->ctrl.device,
3407 "NVME-FC{%d}: controller connect complete\n",
3412 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
3413 .queue_rq = nvme_fc_queue_rq,
3414 .complete = nvme_fc_complete_rq,
3415 .init_request = nvme_fc_init_request,
3416 .exit_request = nvme_fc_exit_request,
3417 .init_hctx = nvme_fc_init_admin_hctx,
3418 .timeout = nvme_fc_timeout,
3423 * Fails a controller request if it matches an existing controller
3424 * (association) with the same tuple:
3425 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
3427 * The ports don't need to be compared as they are intrinsically
3428 * already matched by the port pointers supplied.
3431 nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3432 struct nvmf_ctrl_options *opts)
3434 struct nvme_fc_ctrl *ctrl;
3435 unsigned long flags;
3438 spin_lock_irqsave(&rport->lock, flags);
3439 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3440 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3444 spin_unlock_irqrestore(&rport->lock, flags);
3449 static struct nvme_ctrl *
3450 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3451 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3453 struct nvme_fc_ctrl *ctrl;
3454 unsigned long flags;
3455 int ret, idx, ctrl_loss_tmo;
3457 if (!(rport->remoteport.port_role &
3458 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3463 if (!opts->duplicate_connect &&
3464 nvme_fc_existing_controller(rport, opts)) {
3469 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3475 idx = ida_alloc(&nvme_fc_ctrl_cnt, GFP_KERNEL);
3482 * if ctrl_loss_tmo is being enforced and the default reconnect delay
3483 * is being used, change to a shorter reconnect delay for FC.
3485 if (opts->max_reconnects != -1 &&
3486 opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY &&
3487 opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) {
3488 ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay;
3489 opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO;
3490 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
3491 opts->reconnect_delay);
3494 ctrl->ctrl.opts = opts;
3495 ctrl->ctrl.nr_reconnects = 0;
3496 INIT_LIST_HEAD(&ctrl->ctrl_list);
3497 ctrl->lport = lport;
3498 ctrl->rport = rport;
3499 ctrl->dev = lport->dev;
3501 ctrl->ioq_live = false;
3502 init_waitqueue_head(&ctrl->ioabort_wait);
3504 get_device(ctrl->dev);
3505 kref_init(&ctrl->ref);
3507 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3508 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3509 INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
3510 spin_lock_init(&ctrl->lock);
3512 /* io queue count */
3513 ctrl->ctrl.queue_count = min_t(unsigned int,
3515 lport->ops->max_hw_queues);
3516 ctrl->ctrl.queue_count++; /* +1 for admin queue */
3518 ctrl->ctrl.sqsize = opts->queue_size - 1;
3519 ctrl->ctrl.kato = opts->kato;
3520 ctrl->ctrl.cntlid = 0xffff;
3523 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3524 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3528 nvme_fc_init_queue(ctrl, 0);
3531 * Would have been nice to init io queues tag set as well.
3532 * However, we require interaction from the controller
3533 * for max io queue count before we can do so.
3534 * Defer this to the connect path.
3537 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3539 goto out_free_queues;
3541 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3543 /* at this point, teardown path changes to ref counting on nvme ctrl */
3545 ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
3546 &nvme_fc_admin_mq_ops,
3547 struct_size_t(struct nvme_fcp_op_w_sgl, priv,
3548 ctrl->lport->ops->fcprqst_priv_sz));
3552 spin_lock_irqsave(&rport->lock, flags);
3553 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3554 spin_unlock_irqrestore(&rport->lock, flags);
3556 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3557 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3558 dev_err(ctrl->ctrl.device,
3559 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3563 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3564 dev_err(ctrl->ctrl.device,
3565 "NVME-FC{%d}: failed to schedule initial connect\n",
3570 flush_delayed_work(&ctrl->connect_work);
3572 dev_info(ctrl->ctrl.device,
3573 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3574 ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
3579 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3580 cancel_work_sync(&ctrl->ioerr_work);
3581 cancel_work_sync(&ctrl->ctrl.reset_work);
3582 cancel_delayed_work_sync(&ctrl->connect_work);
3584 ctrl->ctrl.opts = NULL;
3586 /* initiate nvme ctrl ref counting teardown */
3587 nvme_uninit_ctrl(&ctrl->ctrl);
3589 /* Remove core ctrl ref. */
3590 nvme_put_ctrl(&ctrl->ctrl);
3592 /* as we're past the point where we transition to the ref
3593 * counting teardown path, if we return a bad pointer here,
3594 * the calling routine, thinking it's prior to the
3595 * transition, will do an rport put. Since the teardown
3596 * path also does a rport put, we do an extra get here to
3597 * so proper order/teardown happens.
3599 nvme_fc_rport_get(rport);
3601 return ERR_PTR(-EIO);
3604 kfree(ctrl->queues);
3606 put_device(ctrl->dev);
3607 ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
3611 /* exit via here doesn't follow ctlr ref points */
3612 return ERR_PTR(ret);
3616 struct nvmet_fc_traddr {
3622 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3626 if (match_u64(sstr, &token64))
3634 * This routine validates and extracts the WWN's from the TRADDR string.
3635 * As kernel parsers need the 0x to determine number base, universally
3636 * build string to parse with 0x prefix before parsing name strings.
3639 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3641 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3642 substring_t wwn = { name, &name[sizeof(name)-1] };
3643 int nnoffset, pnoffset;
3645 /* validate if string is one of the 2 allowed formats */
3646 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3647 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3648 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3649 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3650 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3651 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3652 NVME_FC_TRADDR_OXNNLEN;
3653 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3654 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3655 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3656 "pn-", NVME_FC_TRADDR_NNLEN))) {
3657 nnoffset = NVME_FC_TRADDR_NNLEN;
3658 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3664 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3666 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3667 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3670 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3671 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3677 pr_warn("%s: bad traddr string\n", __func__);
3681 static struct nvme_ctrl *
3682 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3684 struct nvme_fc_lport *lport;
3685 struct nvme_fc_rport *rport;
3686 struct nvme_ctrl *ctrl;
3687 struct nvmet_fc_traddr laddr = { 0L, 0L };
3688 struct nvmet_fc_traddr raddr = { 0L, 0L };
3689 unsigned long flags;
3692 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3693 if (ret || !raddr.nn || !raddr.pn)
3694 return ERR_PTR(-EINVAL);
3696 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3697 if (ret || !laddr.nn || !laddr.pn)
3698 return ERR_PTR(-EINVAL);
3700 /* find the host and remote ports to connect together */
3701 spin_lock_irqsave(&nvme_fc_lock, flags);
3702 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3703 if (lport->localport.node_name != laddr.nn ||
3704 lport->localport.port_name != laddr.pn ||
3705 lport->localport.port_state != FC_OBJSTATE_ONLINE)
3708 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3709 if (rport->remoteport.node_name != raddr.nn ||
3710 rport->remoteport.port_name != raddr.pn ||
3711 rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3714 /* if fail to get reference fall through. Will error */
3715 if (!nvme_fc_rport_get(rport))
3718 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3720 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3722 nvme_fc_rport_put(rport);
3726 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3728 pr_warn("%s: %s - %s combination not found\n",
3729 __func__, opts->traddr, opts->host_traddr);
3730 return ERR_PTR(-ENOENT);
3734 static struct nvmf_transport_ops nvme_fc_transport = {
3736 .module = THIS_MODULE,
3737 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3738 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3739 .create_ctrl = nvme_fc_create_ctrl,
3742 /* Arbitrary successive failures max. With lots of subsystems could be high */
3743 #define DISCOVERY_MAX_FAIL 20
3745 static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
3746 struct device_attribute *attr, const char *buf, size_t count)
3748 unsigned long flags;
3749 LIST_HEAD(local_disc_list);
3750 struct nvme_fc_lport *lport;
3751 struct nvme_fc_rport *rport;
3754 spin_lock_irqsave(&nvme_fc_lock, flags);
3756 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3757 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3758 if (!nvme_fc_lport_get(lport))
3760 if (!nvme_fc_rport_get(rport)) {
3762 * This is a temporary condition. Upon restart
3763 * this rport will be gone from the list.
3765 * Revert the lport put and retry. Anything
3766 * added to the list already will be skipped (as
3767 * they are no longer list_empty). Loops should
3768 * resume at rports that were not yet seen.
3770 nvme_fc_lport_put(lport);
3772 if (failcnt++ < DISCOVERY_MAX_FAIL)
3775 pr_err("nvme_discovery: too many reference "
3777 goto process_local_list;
3779 if (list_empty(&rport->disc_list))
3780 list_add_tail(&rport->disc_list,
3786 while (!list_empty(&local_disc_list)) {
3787 rport = list_first_entry(&local_disc_list,
3788 struct nvme_fc_rport, disc_list);
3789 list_del_init(&rport->disc_list);
3790 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3792 lport = rport->lport;
3793 /* signal discovery. Won't hurt if it repeats */
3794 nvme_fc_signal_discovery_scan(lport, rport);
3795 nvme_fc_rport_put(rport);
3796 nvme_fc_lport_put(lport);
3798 spin_lock_irqsave(&nvme_fc_lock, flags);
3800 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3805 static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
3807 #ifdef CONFIG_BLK_CGROUP_FC_APPID
3808 /* Parse the cgroup id from a buf and return the length of cgrpid */
3809 static int fc_parse_cgrpid(const char *buf, u64 *id)
3814 memset(cgrp_id, 0x0, sizeof(cgrp_id));
3815 for (cgrpid_len = 0, j = 0; cgrpid_len < 17; cgrpid_len++) {
3816 if (buf[cgrpid_len] != ':')
3817 cgrp_id[cgrpid_len] = buf[cgrpid_len];
3825 if (kstrtou64(cgrp_id, 16, id) < 0)
3831 * Parse and update the appid in the blkcg associated with the cgroupid.
3833 static ssize_t fc_appid_store(struct device *dev,
3834 struct device_attribute *attr, const char *buf, size_t count)
3836 size_t orig_count = count;
3840 char app_id[FC_APPID_LEN];
3843 if (buf[count-1] == '\n')
3846 if ((count > (16+1+FC_APPID_LEN)) || (!strchr(buf, ':')))
3849 cgrpid_len = fc_parse_cgrpid(buf, &cgrp_id);
3852 appid_len = count - cgrpid_len - 1;
3853 if (appid_len > FC_APPID_LEN)
3856 memset(app_id, 0x0, sizeof(app_id));
3857 memcpy(app_id, &buf[cgrpid_len+1], appid_len);
3858 ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id));
3863 static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
3864 #endif /* CONFIG_BLK_CGROUP_FC_APPID */
3866 static struct attribute *nvme_fc_attrs[] = {
3867 &dev_attr_nvme_discovery.attr,
3868 #ifdef CONFIG_BLK_CGROUP_FC_APPID
3869 &dev_attr_appid_store.attr,
3874 static const struct attribute_group nvme_fc_attr_group = {
3875 .attrs = nvme_fc_attrs,
3878 static const struct attribute_group *nvme_fc_attr_groups[] = {
3879 &nvme_fc_attr_group,
3883 static struct class fc_class = {
3885 .dev_groups = nvme_fc_attr_groups,
3888 static int __init nvme_fc_init_module(void)
3894 * It is expected that in the future the kernel will combine
3895 * the FC-isms that are currently under scsi and now being
3896 * added to by NVME into a new standalone FC class. The SCSI
3897 * and NVME protocols and their devices would be under this
3900 * As we need something to post FC-specific udev events to,
3901 * specifically for nvme probe events, start by creating the
3902 * new device class. When the new standalone FC class is
3903 * put in place, this code will move to a more generic
3904 * location for the class.
3906 ret = class_register(&fc_class);
3908 pr_err("couldn't register class fc\n");
3913 * Create a device for the FC-centric udev events
3915 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
3917 if (IS_ERR(fc_udev_device)) {
3918 pr_err("couldn't create fc_udev device!\n");
3919 ret = PTR_ERR(fc_udev_device);
3920 goto out_destroy_class;
3923 ret = nvmf_register_transport(&nvme_fc_transport);
3925 goto out_destroy_device;
3930 device_destroy(&fc_class, MKDEV(0, 0));
3932 class_unregister(&fc_class);
3938 nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
3940 struct nvme_fc_ctrl *ctrl;
3942 spin_lock(&rport->lock);
3943 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3944 dev_warn(ctrl->ctrl.device,
3945 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3947 nvme_delete_ctrl(&ctrl->ctrl);
3949 spin_unlock(&rport->lock);
3952 static void __exit nvme_fc_exit_module(void)
3954 struct nvme_fc_lport *lport;
3955 struct nvme_fc_rport *rport;
3956 unsigned long flags;
3958 spin_lock_irqsave(&nvme_fc_lock, flags);
3959 list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
3960 list_for_each_entry(rport, &lport->endp_list, endp_list)
3961 nvme_fc_delete_controllers(rport);
3962 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3963 flush_workqueue(nvme_delete_wq);
3965 nvmf_unregister_transport(&nvme_fc_transport);
3967 device_destroy(&fc_class, MKDEV(0, 0));
3968 class_unregister(&fc_class);
3971 module_init(nvme_fc_init_module);
3972 module_exit(nvme_fc_exit_module);
3974 MODULE_DESCRIPTION("NVMe host FC transport driver");
3975 MODULE_LICENSE("GPL v2");