2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2017 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include <linux/scatterlist.h>
9 #include <linux/delay.h>
10 #include <linux/nvme.h>
11 #include <linux/nvme-fc.h>
13 static struct nvme_fc_port_template qla_nvme_fc_transport;
15 static void qla_nvme_unregister_remote_port(struct work_struct *);
17 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
19 struct qla_nvme_rport *rport;
20 struct nvme_fc_port_info req;
23 if (!IS_ENABLED(CONFIG_NVME_FC))
26 if (!vha->flags.nvme_enabled) {
27 ql_log(ql_log_info, vha, 0x2100,
28 "%s: Not registering target since Host NVME is not enabled\n",
33 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
36 if (!(fcport->nvme_prli_service_param &
37 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
38 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
41 INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
42 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
44 memset(&req, 0, sizeof(struct nvme_fc_port_info));
45 req.port_name = wwn_to_u64(fcport->port_name);
46 req.node_name = wwn_to_u64(fcport->node_name);
48 req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
50 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
51 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
53 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
54 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
56 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
57 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
59 req.port_id = fcport->d_id.b24;
61 ql_log(ql_log_info, vha, 0x2102,
62 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
63 __func__, req.node_name, req.port_name,
66 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
67 &fcport->nvme_remote_port);
69 ql_log(ql_log_warn, vha, 0x212e,
70 "Failed to register remote port. Transport returned %d\n",
75 rport = fcport->nvme_remote_port->private;
76 rport->fcport = fcport;
77 list_add_tail(&rport->list, &vha->nvme_rport_list);
79 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
83 /* Allocate a queue for NVMe traffic */
84 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
85 unsigned int qidx, u16 qsize, void **handle)
87 struct scsi_qla_host *vha;
88 struct qla_hw_data *ha;
89 struct qla_qpair *qpair;
94 vha = (struct scsi_qla_host *)lport->private;
97 ql_log(ql_log_info, vha, 0x2104,
98 "%s: handle %p, idx =%d, qsize %d\n",
99 __func__, handle, qidx, qsize);
101 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
102 ql_log(ql_log_warn, vha, 0x212f,
103 "%s: Illegal qidx=%d. Max=%d\n",
104 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
108 if (ha->queue_pair_map[qidx]) {
109 *handle = ha->queue_pair_map[qidx];
110 ql_log(ql_log_info, vha, 0x2121,
111 "Returning existing qpair of %p for idx=%x\n",
116 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
118 ql_log(ql_log_warn, vha, 0x2122,
119 "Failed to allocate qpair\n");
127 static void qla_nvme_sp_ls_done(void *ptr, int res)
130 struct srb_iocb *nvme;
131 struct nvmefc_ls_req *fd;
132 struct nvme_private *priv;
134 if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
137 atomic_dec(&sp->ref_count);
142 nvme = &sp->u.iocb_cmd;
143 fd = nvme->u.nvme.desc;
145 priv->comp_status = res;
146 schedule_work(&priv->ls_work);
147 /* work schedule doesn't need the sp */
151 static void qla_nvme_sp_done(void *ptr, int res)
154 struct srb_iocb *nvme;
155 struct nvmefc_fcp_req *fd;
157 nvme = &sp->u.iocb_cmd;
158 fd = nvme->u.nvme.desc;
160 if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
163 atomic_dec(&sp->ref_count);
165 if (res == QLA_SUCCESS) {
166 fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
169 fd->transferred_length = 0;
173 qla2xxx_rel_qpair_sp(sp->qpair, sp);
178 static void qla_nvme_abort_work(struct work_struct *work)
180 struct nvme_private *priv =
181 container_of(work, struct nvme_private, abort_work);
182 srb_t *sp = priv->sp;
183 fc_port_t *fcport = sp->fcport;
184 struct qla_hw_data *ha = fcport->vha->hw;
187 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
188 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
189 __func__, sp, sp->handle, fcport, fcport->deleted);
191 if (!ha->flags.fw_started && (fcport && fcport->deleted))
194 if (ha->flags.host_shutting_down) {
195 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
196 "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
197 __func__, sp, sp->type, atomic_read(&sp->ref_count));
202 if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
205 rval = ha->isp_ops->abort_command(sp);
207 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
208 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
209 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
210 sp, sp->handle, fcport, rval);
213 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
214 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
216 struct nvme_private *priv = fd->private;
218 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
219 schedule_work(&priv->abort_work);
222 static void qla_nvme_ls_complete(struct work_struct *work)
224 struct nvme_private *priv =
225 container_of(work, struct nvme_private, ls_work);
226 struct nvmefc_ls_req *fd = priv->fd;
228 fd->done(fd, priv->comp_status);
231 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
232 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
234 struct qla_nvme_rport *qla_rport = rport->private;
235 fc_port_t *fcport = qla_rport->fcport;
236 struct srb_iocb *nvme;
237 struct nvme_private *priv = fd->private;
238 struct scsi_qla_host *vha;
239 int rval = QLA_FUNCTION_FAILED;
240 struct qla_hw_data *ha;
245 /* Alloc SRB structure */
246 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
250 sp->type = SRB_NVME_LS;
251 sp->name = "nvme_ls";
252 sp->done = qla_nvme_sp_ls_done;
253 atomic_set(&sp->ref_count, 1);
254 nvme = &sp->u.iocb_cmd;
257 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
258 nvme->u.nvme.desc = fd;
259 nvme->u.nvme.dir = 0;
261 nvme->u.nvme.cmd_len = fd->rqstlen;
262 nvme->u.nvme.rsp_len = fd->rsplen;
263 nvme->u.nvme.rsp_dma = fd->rspdma;
264 nvme->u.nvme.timeout_sec = fd->timeout;
265 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
266 fd->rqstlen, DMA_TO_DEVICE);
267 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
268 fd->rqstlen, DMA_TO_DEVICE);
270 rval = qla2x00_start_sp(sp);
271 if (rval != QLA_SUCCESS) {
272 ql_log(ql_log_warn, vha, 0x700e,
273 "qla2x00_start_sp failed = %d\n", rval);
274 atomic_dec(&sp->ref_count);
275 wake_up(&sp->nvme_ls_waitq);
282 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
283 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
284 struct nvmefc_fcp_req *fd)
286 struct nvme_private *priv = fd->private;
288 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
289 schedule_work(&priv->abort_work);
292 static inline int qla2x00_start_nvme_mq(srb_t *sp)
298 struct cmd_nvme *cmd_pkt;
303 struct dsd64 *cur_dsd;
304 struct req_que *req = NULL;
305 struct scsi_qla_host *vha = sp->fcport->vha;
306 struct qla_hw_data *ha = vha->hw;
307 struct qla_qpair *qpair = sp->qpair;
308 struct srb_iocb *nvme = &sp->u.iocb_cmd;
309 struct scatterlist *sgl, *sg;
310 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
311 uint32_t rval = QLA_SUCCESS;
313 /* Setup qpair pointers */
315 tot_dsds = fd->sg_cnt;
317 /* Acquire qpair specific lock */
318 spin_lock_irqsave(&qpair->qp_lock, flags);
320 /* Check for room in outstanding command list. */
321 handle = req->current_outstanding_cmd;
322 for (index = 1; index < req->num_outstanding_cmds; index++) {
324 if (handle == req->num_outstanding_cmds)
326 if (!req->outstanding_cmds[handle])
330 if (index == req->num_outstanding_cmds) {
334 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
335 if (req->cnt < (req_cnt + 2)) {
336 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
337 RD_REG_DWORD_RELAXED(req->req_q_out);
339 if (req->ring_index < cnt)
340 req->cnt = cnt - req->ring_index;
342 req->cnt = req->length - (req->ring_index - cnt);
344 if (req->cnt < (req_cnt + 2)){
350 if (unlikely(!fd->sqid)) {
351 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
353 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
354 nvme->u.nvme.aen_op = 1;
355 atomic_inc(&ha->nvme_active_aen_cnt);
359 /* Build command packet. */
360 req->current_outstanding_cmd = handle;
361 req->outstanding_cmds[handle] = sp;
365 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
366 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
368 /* Zero out remaining portion of packet. */
369 clr_ptr = (uint32_t *)cmd_pkt + 2;
370 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
372 cmd_pkt->entry_status = 0;
374 /* Update entry type to indicate Command NVME IOCB */
375 cmd_pkt->entry_type = COMMAND_NVME;
377 /* No data transfer how do we check buffer len == 0?? */
378 if (fd->io_dir == NVMEFC_FCP_READ) {
379 cmd_pkt->control_flags = CF_READ_DATA;
380 vha->qla_stats.input_bytes += fd->payload_length;
381 vha->qla_stats.input_requests++;
382 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
383 cmd_pkt->control_flags = CF_WRITE_DATA;
384 if ((vha->flags.nvme_first_burst) &&
385 (sp->fcport->nvme_prli_service_param &
386 NVME_PRLI_SP_FIRST_BURST)) {
387 if ((fd->payload_length <=
388 sp->fcport->nvme_first_burst_size) ||
389 (sp->fcport->nvme_first_burst_size == 0))
390 cmd_pkt->control_flags |=
391 CF_NVME_FIRST_BURST_ENABLE;
393 vha->qla_stats.output_bytes += fd->payload_length;
394 vha->qla_stats.output_requests++;
395 } else if (fd->io_dir == 0) {
396 cmd_pkt->control_flags = 0;
400 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
401 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
402 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
403 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
404 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
407 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
408 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
411 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
412 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
414 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
415 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
417 /* One DSD is available in the Command Type NVME IOCB */
419 cur_dsd = &cmd_pkt->nvme_dsd;
422 /* Load data segments */
423 for_each_sg(sgl, sg, tot_dsds, i) {
424 cont_a64_entry_t *cont_pkt;
426 /* Allocate additional continuation packets? */
427 if (avail_dsds == 0) {
429 * Five DSDs are available in the Continuation
433 /* Adjust ring index */
435 if (req->ring_index == req->length) {
437 req->ring_ptr = req->ring;
441 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
442 put_unaligned_le32(CONTINUE_A64_TYPE,
443 &cont_pkt->entry_type);
445 cur_dsd = cont_pkt->dsd;
446 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
449 append_dsd64(&cur_dsd, sg);
453 /* Set total entry count. */
454 cmd_pkt->entry_count = (uint8_t)req_cnt;
457 /* Adjust ring index. */
459 if (req->ring_index == req->length) {
461 req->ring_ptr = req->ring;
466 /* Set chip new ring index. */
467 WRT_REG_DWORD(req->req_q_in, req->ring_index);
470 spin_unlock_irqrestore(&qpair->qp_lock, flags);
475 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
476 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
477 struct nvmefc_fcp_req *fd)
480 struct srb_iocb *nvme;
481 struct scsi_qla_host *vha;
484 struct qla_qpair *qpair = hw_queue_handle;
485 struct nvme_private *priv = fd->private;
486 struct qla_nvme_rport *qla_rport = rport->private;
488 fcport = qla_rport->fcport;
492 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
496 * If we know the dev is going away while the transport is still sending
497 * IO's return busy back to stall the IO Q. This happens when the
498 * link goes away and fw hasn't notified us yet, but IO's are being
499 * returned. If the dev comes back quickly we won't exhaust the IO
500 * retry count at the core.
502 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
505 /* Alloc SRB structure */
506 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
510 atomic_set(&sp->ref_count, 1);
511 init_waitqueue_head(&sp->nvme_ls_waitq);
513 sp->type = SRB_NVME_CMD;
514 sp->name = "nvme_cmd";
515 sp->done = qla_nvme_sp_done;
518 nvme = &sp->u.iocb_cmd;
519 nvme->u.nvme.desc = fd;
521 rval = qla2x00_start_nvme_mq(sp);
522 if (rval != QLA_SUCCESS) {
523 ql_log(ql_log_warn, vha, 0x212d,
524 "qla2x00_start_nvme_mq failed = %d\n", rval);
525 atomic_dec(&sp->ref_count);
526 wake_up(&sp->nvme_ls_waitq);
532 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
534 struct scsi_qla_host *vha = lport->private;
536 ql_log(ql_log_info, vha, 0x210f,
537 "localport delete of %p completed.\n", vha->nvme_local_port);
538 vha->nvme_local_port = NULL;
539 complete(&vha->nvme_del_done);
542 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
545 struct qla_nvme_rport *qla_rport = rport->private, *trport;
547 fcport = qla_rport->fcport;
548 fcport->nvme_remote_port = NULL;
549 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
551 list_for_each_entry_safe(qla_rport, trport,
552 &fcport->vha->nvme_rport_list, list) {
553 if (qla_rport->fcport == fcport) {
554 list_del(&qla_rport->list);
558 complete(&fcport->nvme_del_done);
560 if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
561 INIT_WORK(&fcport->free_work, qlt_free_session_done);
562 schedule_work(&fcport->free_work);
565 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
566 ql_log(ql_log_info, fcport->vha, 0x2110,
567 "remoteport_delete of %p completed.\n", fcport);
570 static struct nvme_fc_port_template qla_nvme_fc_transport = {
571 .localport_delete = qla_nvme_localport_delete,
572 .remoteport_delete = qla_nvme_remoteport_delete,
573 .create_queue = qla_nvme_alloc_queue,
574 .delete_queue = NULL,
575 .ls_req = qla_nvme_ls_req,
576 .ls_abort = qla_nvme_ls_abort,
577 .fcp_io = qla_nvme_post_cmd,
578 .fcp_abort = qla_nvme_fcp_abort,
580 .max_sgl_segments = 1024,
581 .max_dif_sgl_segments = 64,
582 .dma_boundary = 0xFFFFFFFF,
584 .remote_priv_sz = sizeof(struct qla_nvme_rport),
585 .lsrqst_priv_sz = sizeof(struct nvme_private),
586 .fcprqst_priv_sz = sizeof(struct nvme_private),
589 static void qla_nvme_unregister_remote_port(struct work_struct *work)
591 struct fc_port *fcport = container_of(work, struct fc_port,
593 struct qla_nvme_rport *qla_rport, *trport;
595 if (!IS_ENABLED(CONFIG_NVME_FC))
598 ql_log(ql_log_warn, NULL, 0x2112,
599 "%s: unregister remoteport on %p\n",__func__, fcport);
601 list_for_each_entry_safe(qla_rport, trport,
602 &fcport->vha->nvme_rport_list, list) {
603 if (qla_rport->fcport == fcport) {
604 ql_log(ql_log_info, fcport->vha, 0x2113,
605 "%s: fcport=%p\n", __func__, fcport);
606 nvme_fc_set_remoteport_devloss
607 (fcport->nvme_remote_port, 0);
608 init_completion(&fcport->nvme_del_done);
609 if (nvme_fc_unregister_remoteport
610 (fcport->nvme_remote_port))
611 ql_log(ql_log_info, fcport->vha, 0x2114,
612 "%s: Failed to unregister nvme_remote_port\n",
614 wait_for_completion(&fcport->nvme_del_done);
620 void qla_nvme_delete(struct scsi_qla_host *vha)
624 if (!IS_ENABLED(CONFIG_NVME_FC))
627 if (vha->nvme_local_port) {
628 init_completion(&vha->nvme_del_done);
629 ql_log(ql_log_info, vha, 0x2116,
630 "unregister localport=%p\n",
631 vha->nvme_local_port);
632 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
634 ql_log(ql_log_info, vha, 0x2115,
635 "Unregister of localport failed\n");
637 wait_for_completion(&vha->nvme_del_done);
641 int qla_nvme_register_hba(struct scsi_qla_host *vha)
643 struct nvme_fc_port_template *tmpl;
644 struct qla_hw_data *ha;
645 struct nvme_fc_port_info pinfo;
648 if (!IS_ENABLED(CONFIG_NVME_FC))
652 tmpl = &qla_nvme_fc_transport;
654 WARN_ON(vha->nvme_local_port);
655 WARN_ON(ha->max_req_queues < 3);
657 qla_nvme_fc_transport.max_hw_queues =
658 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
659 (uint8_t)(ha->max_req_queues - 2));
661 pinfo.node_name = wwn_to_u64(vha->node_name);
662 pinfo.port_name = wwn_to_u64(vha->port_name);
663 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
664 pinfo.port_id = vha->d_id.b24;
666 ql_log(ql_log_info, vha, 0xffff,
667 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
668 pinfo.node_name, pinfo.port_name, pinfo.port_id);
669 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
671 ret = nvme_fc_register_localport(&pinfo, tmpl,
672 get_device(&ha->pdev->dev), &vha->nvme_local_port);
674 ql_log(ql_log_warn, vha, 0xffff,
675 "register_localport failed: ret=%x\n", ret);
677 vha->nvme_local_port->private = vha;