scsi: lpfc: Enable SCSI and NVME fc4s by default
[linux-2.6-block.git] / drivers / scsi / lpfc / lpfc_nvme.c
CommitLineData
01649561
JS
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
128bddac 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
3e21d1cb 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
01649561
JS
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
01649561
JS
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/unaligned.h>
28#include <linux/crc-t10dif.h>
29#include <net/checksum.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include <linux/nvme.h>
40#include <linux/nvme-fc-driver.h>
41#include <linux/nvme-fc.h>
42#include "lpfc_version.h"
43#include "lpfc_hw4.h"
44#include "lpfc_hw.h"
45#include "lpfc_sli.h"
46#include "lpfc_sli4.h"
47#include "lpfc_nl.h"
48#include "lpfc_disc.h"
49#include "lpfc.h"
50#include "lpfc_nvme.h"
51#include "lpfc_scsi.h"
52#include "lpfc_logmsg.h"
53#include "lpfc_crtn.h"
54#include "lpfc_vport.h"
bd2cdd5e 55#include "lpfc_debugfs.h"
01649561
JS
56
57/* NVME initiator-based functions */
58
c490850a 59static struct lpfc_io_buf *
cf1a1d3e 60lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
5e5b511d 61 int idx, int expedite);
01649561
JS
62
63static void
c490850a 64lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
01649561 65
81e6a637 66static struct nvme_fc_port_template lpfc_nvme_template;
01649561 67
fab2e466
CIK
68static union lpfc_wqe128 lpfc_iread_cmd_template;
69static union lpfc_wqe128 lpfc_iwrite_cmd_template;
70static union lpfc_wqe128 lpfc_icmnd_cmd_template;
5fd11085
JS
71
72/* Setup WQE templates for NVME IOs */
73void
fab2e466 74lpfc_nvme_cmd_template(void)
5fd11085
JS
75{
76 union lpfc_wqe128 *wqe;
77
78 /* IREAD template */
79 wqe = &lpfc_iread_cmd_template;
80 memset(wqe, 0, sizeof(union lpfc_wqe128));
81
82 /* Word 0, 1, 2 - BDE is variable */
83
84 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
85
86 /* Word 4 - total_xfer_len is variable */
87
88 /* Word 5 - is zero */
89
90 /* Word 6 - ctxt_tag, xri_tag is variable */
91
92 /* Word 7 */
93 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
94 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
95 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
96 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
97
98 /* Word 8 - abort_tag is variable */
99
100 /* Word 9 - reqtag is variable */
101
102 /* Word 10 - dbde, wqes is variable */
103 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
104 bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1);
105 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
106 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
107 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
108 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
109
110 /* Word 11 - pbde is variable */
111 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD);
112 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
113 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
114
115 /* Word 12 - is zero */
116
117 /* Word 13, 14, 15 - PBDE is variable */
118
119 /* IWRITE template */
120 wqe = &lpfc_iwrite_cmd_template;
121 memset(wqe, 0, sizeof(union lpfc_wqe128));
122
123 /* Word 0, 1, 2 - BDE is variable */
124
125 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
126
127 /* Word 4 - total_xfer_len is variable */
128
129 /* Word 5 - initial_xfer_len is variable */
130
131 /* Word 6 - ctxt_tag, xri_tag is variable */
132
133 /* Word 7 */
134 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
135 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
136 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
137 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
138
139 /* Word 8 - abort_tag is variable */
140
141 /* Word 9 - reqtag is variable */
142
143 /* Word 10 - dbde, wqes is variable */
144 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
145 bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1);
146 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
147 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
148 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
149 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
150
151 /* Word 11 - pbde is variable */
152 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD);
153 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
154 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
155
156 /* Word 12 - is zero */
157
158 /* Word 13, 14, 15 - PBDE is variable */
159
160 /* ICMND template */
161 wqe = &lpfc_icmnd_cmd_template;
162 memset(wqe, 0, sizeof(union lpfc_wqe128));
163
164 /* Word 0, 1, 2 - BDE is variable */
165
166 /* Word 3 - payload_offset_len is variable */
167
168 /* Word 4, 5 - is zero */
169
170 /* Word 6 - ctxt_tag, xri_tag is variable */
171
172 /* Word 7 */
173 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
174 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
175 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
176 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
177
178 /* Word 8 - abort_tag is variable */
179
180 /* Word 9 - reqtag is variable */
181
182 /* Word 10 - dbde, wqes is variable */
183 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
184 bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
185 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
186 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
187 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
188 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
189
190 /* Word 11 */
191 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND);
192 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
193 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
194
195 /* Word 12, 13, 14, 15 - is zero */
196}
197
01649561
JS
198/**
199 * lpfc_nvme_create_queue -
200 * @lpfc_pnvme: Pointer to the driver's nvme instance data
201 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
202 * @handle: An opaque driver handle used in follow-up calls.
203 *
204 * Driver registers this routine to preallocate and initialize any
205 * internal data structures to bind the @qidx to its internal IO queues.
206 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
207 *
208 * Return value :
209 * 0 - Success
210 * -EINVAL - Unsupported input value.
211 * -ENOMEM - Could not alloc necessary memory
212 **/
213static int
214lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
215 unsigned int qidx, u16 qsize,
216 void **handle)
217{
218 struct lpfc_nvme_lport *lport;
219 struct lpfc_vport *vport;
220 struct lpfc_nvme_qhandle *qhandle;
221 char *str;
222
c3725bdc
JS
223 if (!pnvme_lport->private)
224 return -ENOMEM;
225
01649561
JS
226 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
227 vport = lport->vport;
228 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
229 if (qhandle == NULL)
230 return -ENOMEM;
231
232 qhandle->cpu_id = smp_processor_id();
233 qhandle->qidx = qidx;
234 /*
235 * NVME qidx == 0 is the admin queue, so both admin queue
236 * and first IO queue will use MSI-X vector and associated
237 * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
238 */
239 if (qidx) {
240 str = "IO "; /* IO queue */
241 qhandle->index = ((qidx - 1) %
6a828b0f 242 lpfc_nvme_template.max_hw_queues);
01649561
JS
243 } else {
244 str = "ADM"; /* Admin queue */
245 qhandle->index = qidx;
246 }
247
d58734f0 248 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
01649561 249 "6073 Binding %s HdwQueue %d (cpu %d) to "
cdb42bec 250 "hdw_queue %d qhandle %p\n", str,
01649561
JS
251 qidx, qhandle->cpu_id, qhandle->index, qhandle);
252 *handle = (void *)qhandle;
253 return 0;
254}
255
256/**
257 * lpfc_nvme_delete_queue -
258 * @lpfc_pnvme: Pointer to the driver's nvme instance data
259 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
260 * @handle: An opaque driver handle from lpfc_nvme_create_queue
261 *
262 * Driver registers this routine to free
263 * any internal data structures to bind the @qidx to its internal
264 * IO queues.
265 *
266 * Return value :
267 * 0 - Success
268 * TODO: What are the failure codes.
269 **/
270static void
271lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
272 unsigned int qidx,
273 void *handle)
274{
275 struct lpfc_nvme_lport *lport;
276 struct lpfc_vport *vport;
277
c3725bdc
JS
278 if (!pnvme_lport->private)
279 return;
280
01649561
JS
281 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
282 vport = lport->vport;
283
284 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2879265f 285 "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
01649561
JS
286 lport, qidx, handle);
287 kfree(handle);
288}
289
290static void
291lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
292{
293 struct lpfc_nvme_lport *lport = localport->private;
294
add9d6be
JS
295 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
296 "6173 localport %p delete complete\n",
297 lport);
298
01649561
JS
299 /* release any threads waiting for the unreg to complete */
300 complete(&lport->lport_unreg_done);
301}
302
303/* lpfc_nvme_remoteport_delete
304 *
305 * @remoteport: Pointer to an nvme transport remoteport instance.
306 *
307 * This is a template downcall. NVME transport calls this function
308 * when it has completed the unregistration of a previously
309 * registered remoteport.
310 *
311 * Return value :
312 * None
313 */
314void
315lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
316{
317 struct lpfc_nvme_rport *rport = remoteport->private;
318 struct lpfc_vport *vport;
319 struct lpfc_nodelist *ndlp;
320
321 ndlp = rport->ndlp;
322 if (!ndlp)
323 goto rport_err;
324
325 vport = ndlp->vport;
326 if (!vport)
327 goto rport_err;
328
329 /* Remove this rport from the lport's list - memory is owned by the
330 * transport. Remove the ndlp reference for the NVME transport before
7a06dcd3 331 * calling state machine to remove the node.
01649561
JS
332 */
333 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
3fd78355 334 "6146 remoteport delete of remoteport %p\n",
01649561 335 remoteport);
3fd78355 336 spin_lock_irq(&vport->phba->hbalock);
b15bd3e6
JS
337
338 /* The register rebind might have occurred before the delete
339 * downcall. Guard against this race.
340 */
341 if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
342 ndlp->nrport = NULL;
343 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
344 }
3fd78355
JS
345 spin_unlock_irq(&vport->phba->hbalock);
346
347 /* Remove original register reference. The host transport
348 * won't reference this rport/remoteport any further.
349 */
01649561
JS
350 lpfc_nlp_put(ndlp);
351
352 rport_err:
3fd78355 353 return;
01649561
JS
354}
355
356static void
357lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
358 struct lpfc_wcqe_complete *wcqe)
359{
360 struct lpfc_vport *vport = cmdwqe->vport;
4b056682 361 struct lpfc_nvme_lport *lport;
01649561
JS
362 uint32_t status;
363 struct nvmefc_ls_req *pnvme_lsreq;
364 struct lpfc_dmabuf *buf_ptr;
365 struct lpfc_nodelist *ndlp;
366
01649561
JS
367 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
368 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
66a210ff 369
66a85155
JS
370 if (vport->localport) {
371 lport = (struct lpfc_nvme_lport *)vport->localport->private;
372 if (lport) {
373 atomic_inc(&lport->fc4NvmeLsCmpls);
374 if (status) {
375 if (bf_get(lpfc_wcqe_c_xb, wcqe))
376 atomic_inc(&lport->cmpl_ls_xb);
377 atomic_inc(&lport->cmpl_ls_err);
378 }
66a210ff 379 }
4b056682
JS
380 }
381
01649561
JS
382 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
383 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
384 "6047 nvme cmpl Enter "
815a9c43
JS
385 "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
386 "lsreg:%p bmp:%p ndlp:%p\n",
01649561
JS
387 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
388 cmdwqe->sli4_xritag, status,
815a9c43 389 (wcqe->parameter & 0xffff),
01649561
JS
390 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
391
bd2cdd5e
JS
392 lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
393 cmdwqe->sli4_xritag, status, wcqe->parameter);
394
01649561
JS
395 if (cmdwqe->context3) {
396 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
397 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
398 kfree(buf_ptr);
399 cmdwqe->context3 = NULL;
400 }
401 if (pnvme_lsreq->done)
402 pnvme_lsreq->done(pnvme_lsreq, status);
403 else
404 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
405 "6046 nvme cmpl without done call back? "
406 "Data %p DID %x Xri: %x status %x\n",
407 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
408 cmdwqe->sli4_xritag, status);
409 if (ndlp) {
410 lpfc_nlp_put(ndlp);
411 cmdwqe->context1 = NULL;
412 }
413 lpfc_sli_release_iocbq(phba, cmdwqe);
414}
415
416static int
417lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
418 struct lpfc_dmabuf *inp,
205e8240
JS
419 struct nvmefc_ls_req *pnvme_lsreq,
420 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
421 struct lpfc_wcqe_complete *),
422 struct lpfc_nodelist *ndlp, uint32_t num_entry,
423 uint32_t tmo, uint8_t retry)
01649561 424{
205e8240
JS
425 struct lpfc_hba *phba = vport->phba;
426 union lpfc_wqe128 *wqe;
01649561
JS
427 struct lpfc_iocbq *genwqe;
428 struct ulp_bde64 *bpl;
429 struct ulp_bde64 bde;
430 int i, rc, xmit_len, first_len;
431
432 /* Allocate buffer for command WQE */
433 genwqe = lpfc_sli_get_iocbq(phba);
434 if (genwqe == NULL)
435 return 1;
436
437 wqe = &genwqe->wqe;
438 memset(wqe, 0, sizeof(union lpfc_wqe));
439
440 genwqe->context3 = (uint8_t *)bmp;
441 genwqe->iocb_flag |= LPFC_IO_NVME_LS;
442
443 /* Save for completion so we can release these resources */
444 genwqe->context1 = lpfc_nlp_get(ndlp);
445 genwqe->context2 = (uint8_t *)pnvme_lsreq;
446 /* Fill in payload, bp points to frame payload */
447
448 if (!tmo)
449 /* FC spec states we need 3 * ratov for CT requests */
450 tmo = (3 * phba->fc_ratov);
451
452 /* For this command calculate the xmit length of the request bde. */
453 xmit_len = 0;
454 first_len = 0;
455 bpl = (struct ulp_bde64 *)bmp->virt;
456 for (i = 0; i < num_entry; i++) {
457 bde.tus.w = bpl[i].tus.w;
458 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
459 break;
460 xmit_len += bde.tus.f.bdeSize;
461 if (i == 0)
462 first_len = xmit_len;
463 }
464
465 genwqe->rsvd2 = num_entry;
466 genwqe->hba_wqidx = 0;
467
468 /* Words 0 - 2 */
469 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
470 wqe->generic.bde.tus.f.bdeSize = first_len;
471 wqe->generic.bde.addrLow = bpl[0].addrLow;
472 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
473
474 /* Word 3 */
475 wqe->gen_req.request_payload_len = first_len;
476
477 /* Word 4 */
478
479 /* Word 5 */
480 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
481 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
482 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
8b361639 483 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
01649561
JS
484 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
485
486 /* Word 6 */
487 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
488 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
489 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
490
491 /* Word 7 */
492 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
493 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
494 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
495 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
496
497 /* Word 8 */
498 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
499
500 /* Word 9 */
501 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
502
503 /* Word 10 */
504 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
505 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
506 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
507 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
508 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
509
510 /* Word 11 */
511 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
512 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
513
514
515 /* Issue GEN REQ WQE for NPORT <did> */
516 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
517 "6050 Issue GEN REQ WQE to NPORT x%x "
518 "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
519 ndlp->nlp_DID, genwqe->iotag,
520 vport->port_state,
521 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
522 genwqe->wqe_cmpl = cmpl;
523 genwqe->iocb_cmpl = NULL;
524 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
525 genwqe->vport = vport;
526 genwqe->retry = retry;
527
bd2cdd5e
JS
528 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
529 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
530
1fbf9742 531 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
cd22d605 532 if (rc) {
01649561
JS
533 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
534 "6045 Issue GEN REQ WQE to NPORT x%x "
535 "Data: x%x x%x\n",
536 ndlp->nlp_DID, genwqe->iotag,
537 vport->port_state);
538 lpfc_sli_release_iocbq(phba, genwqe);
539 return 1;
540 }
541 return 0;
542}
543
544/**
545 * lpfc_nvme_ls_req - Issue an Link Service request
546 * @lpfc_pnvme: Pointer to the driver's nvme instance data
547 * @lpfc_nvme_lport: Pointer to the driver's local port data
548 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
549 *
550 * Driver registers this routine to handle any link service request
551 * from the nvme_fc transport to a remote nvme-aware port.
552 *
553 * Return value :
554 * 0 - Success
555 * TODO: What are the failure codes.
556 **/
557static int
558lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
559 struct nvme_fc_remote_port *pnvme_rport,
560 struct nvmefc_ls_req *pnvme_lsreq)
561{
562 int ret = 0;
563 struct lpfc_nvme_lport *lport;
815a9c43 564 struct lpfc_nvme_rport *rport;
01649561
JS
565 struct lpfc_vport *vport;
566 struct lpfc_nodelist *ndlp;
567 struct ulp_bde64 *bpl;
568 struct lpfc_dmabuf *bmp;
ba43c4d0 569 uint16_t ntype, nstate;
01649561
JS
570
571 /* there are two dma buf in the request, actually there is one and
572 * the second one is just the start address + cmd size.
573 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
574 * in a lpfc_dmabuf struct. When freeing we just free the wrapper
575 * because the nvem layer owns the data bufs.
576 * We do not have to break these packets open, we don't care what is in
577 * them. And we do not have to look at the resonse data, we only care
578 * that we got a response. All of the caring is going to happen in the
579 * nvme-fc layer.
580 */
581
582 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
815a9c43 583 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
66a210ff
JS
584 if (unlikely(!lport) || unlikely(!rport))
585 return -EINVAL;
586
01649561
JS
587 vport = lport->vport;
588
3386f4bd
JS
589 if (vport->load_flag & FC_UNLOADING)
590 return -ENODEV;
591
815a9c43
JS
592 /* Need the ndlp. It is stored in the driver's rport. */
593 ndlp = rport->ndlp;
ba43c4d0
JS
594 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
595 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
815a9c43
JS
596 "6051 Remoteport %p, rport has invalid ndlp. "
597 "Failing LS Req\n", pnvme_rport);
ba43c4d0
JS
598 return -ENODEV;
599 }
600
601 /* The remote node has to be a mapped nvme target or an
602 * unmapped nvme initiator or it's an error.
603 */
604 ntype = ndlp->nlp_type;
605 nstate = ndlp->nlp_state;
606 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
607 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
608 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
609 "6088 DID x%06x not ready for "
610 "IO. State x%x, Type x%x\n",
611 pnvme_rport->port_id,
612 ndlp->nlp_state, ndlp->nlp_type);
613 return -ENODEV;
01649561
JS
614 }
615 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
616 if (!bmp) {
617
618 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
619 "6044 Could not find node for DID %x\n",
620 pnvme_rport->port_id);
621 return 2;
622 }
623 INIT_LIST_HEAD(&bmp->list);
624 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
625 if (!bmp->virt) {
626 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
627 "6042 Could not find node for DID %x\n",
628 pnvme_rport->port_id);
629 kfree(bmp);
630 return 3;
631 }
632 bpl = (struct ulp_bde64 *)bmp->virt;
633 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
634 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
635 bpl->tus.f.bdeFlags = 0;
636 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
637 bpl->tus.w = le32_to_cpu(bpl->tus.w);
638 bpl++;
639
640 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
641 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
642 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
643 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
644 bpl->tus.w = le32_to_cpu(bpl->tus.w);
645
646 /* Expand print to include key fields. */
647 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
815a9c43
JS
648 "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
649 "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
650 ndlp->nlp_DID,
01649561
JS
651 pnvme_lport, pnvme_rport,
652 pnvme_lsreq, pnvme_lsreq->rqstlen,
825c6abb
AB
653 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
654 &pnvme_lsreq->rspdma);
01649561 655
66a210ff 656 atomic_inc(&lport->fc4NvmeLsRequests);
01649561
JS
657
658 /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
659 * This code allows it all to work.
660 */
661 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
662 pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
663 ndlp, 2, 30, 0);
664 if (ret != WQE_SUCCESS) {
4b056682 665 atomic_inc(&lport->xmt_ls_err);
815a9c43 666 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
01649561
JS
667 "6052 EXIT. issue ls wqe failed lport %p, "
668 "rport %p lsreq%p Status %x DID %x\n",
669 pnvme_lport, pnvme_rport, pnvme_lsreq,
670 ret, ndlp->nlp_DID);
671 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
672 kfree(bmp);
673 return ret;
674 }
675
676 /* Stub in routine and return 0 for now. */
677 return ret;
678}
679
680/**
681 * lpfc_nvme_ls_abort - Issue an Link Service request
682 * @lpfc_pnvme: Pointer to the driver's nvme instance data
683 * @lpfc_nvme_lport: Pointer to the driver's local port data
684 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
685 *
686 * Driver registers this routine to handle any link service request
687 * from the nvme_fc transport to a remote nvme-aware port.
688 *
689 * Return value :
690 * 0 - Success
691 * TODO: What are the failure codes.
692 **/
693static void
694lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
695 struct nvme_fc_remote_port *pnvme_rport,
696 struct nvmefc_ls_req *pnvme_lsreq)
697{
698 struct lpfc_nvme_lport *lport;
699 struct lpfc_vport *vport;
700 struct lpfc_hba *phba;
701 struct lpfc_nodelist *ndlp;
702 LIST_HEAD(abort_list);
703 struct lpfc_sli_ring *pring;
704 struct lpfc_iocbq *wqe, *next_wqe;
705
706 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
66a210ff
JS
707 if (unlikely(!lport))
708 return;
01649561
JS
709 vport = lport->vport;
710 phba = vport->phba;
711
3386f4bd
JS
712 if (vport->load_flag & FC_UNLOADING)
713 return;
714
01649561
JS
715 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
716 if (!ndlp) {
717 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
718 "6049 Could not find node for DID %x\n",
719 pnvme_rport->port_id);
720 return;
721 }
722
723 /* Expand print to include key fields. */
724 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
725 "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
825c6abb 726 "rsplen:%d %pad %pad\n",
01649561
JS
727 pnvme_lport, pnvme_rport,
728 pnvme_lsreq, pnvme_lsreq->rqstlen,
825c6abb
AB
729 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
730 &pnvme_lsreq->rspdma);
01649561
JS
731
732 /*
733 * Lock the ELS ring txcmplq and build a local list of all ELS IOs
734 * that need an ABTS. The IOs need to stay on the txcmplq so that
735 * the abort operation completes them successfully.
736 */
737 pring = phba->sli4_hba.nvmels_wq->pring;
738 spin_lock_irq(&phba->hbalock);
739 spin_lock(&pring->ring_lock);
740 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
741 /* Add to abort_list on on NDLP match. */
742 if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
743 wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
744 list_add_tail(&wqe->dlist, &abort_list);
745 }
746 }
747 spin_unlock(&pring->ring_lock);
748 spin_unlock_irq(&phba->hbalock);
749
750 /* Abort the targeted IOs and remove them from the abort list. */
751 list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
4b056682 752 atomic_inc(&lport->xmt_ls_abort);
01649561
JS
753 spin_lock_irq(&phba->hbalock);
754 list_del_init(&wqe->dlist);
755 lpfc_sli_issue_abort_iotag(phba, pring, wqe);
756 spin_unlock_irq(&phba->hbalock);
757 }
758}
759
760/* Fix up the existing sgls for NVME IO. */
5fd11085 761static inline void
01649561 762lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
c490850a 763 struct lpfc_io_buf *lpfc_ncmd,
01649561
JS
764 struct nvmefc_fcp_req *nCmd)
765{
4e565cf0 766 struct lpfc_hba *phba = vport->phba;
01649561
JS
767 struct sli4_sge *sgl;
768 union lpfc_wqe128 *wqe;
769 uint32_t *wptr, *dptr;
770
4e565cf0
JS
771 /*
772 * Get a local pointer to the built-in wqe and correct
773 * the cmd size to match NVME's 96 bytes and fix
774 * the dma address.
775 */
776
205e8240 777 wqe = &lpfc_ncmd->cur_iocbq.wqe;
4e565cf0 778
01649561
JS
779 /*
780 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
781 * match NVME. NVME sends 96 bytes. Also, use the
782 * nvme commands command and response dma addresses
783 * rather than the virtual memory to ease the restore
784 * operation.
785 */
0794d601 786 sgl = lpfc_ncmd->dma_sgl;
01649561 787 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
4e565cf0
JS
788 if (phba->cfg_nvme_embed_cmd) {
789 sgl->addr_hi = 0;
790 sgl->addr_lo = 0;
791
792 /* Word 0-2 - NVME CMND IU (embedded payload) */
793 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
794 wqe->generic.bde.tus.f.bdeSize = 56;
795 wqe->generic.bde.addrHigh = 0;
796 wqe->generic.bde.addrLow = 64; /* Word 16 */
5fd11085
JS
797
798 /* Word 10 - dbde is 0, wqes is 1 in template */
799
800 /*
801 * Embed the payload in the last half of the WQE
802 * WQE words 16-30 get the NVME CMD IU payload
803 *
804 * WQE words 16-19 get payload Words 1-4
805 * WQE words 20-21 get payload Words 6-7
806 * WQE words 22-29 get payload Words 16-23
807 */
808 wptr = &wqe->words[16]; /* WQE ptr */
809 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
810 dptr++; /* Skip Word 0 in payload */
811
812 *wptr++ = *dptr++; /* Word 1 */
813 *wptr++ = *dptr++; /* Word 2 */
814 *wptr++ = *dptr++; /* Word 3 */
815 *wptr++ = *dptr++; /* Word 4 */
816 dptr++; /* Skip Word 5 in payload */
817 *wptr++ = *dptr++; /* Word 6 */
818 *wptr++ = *dptr++; /* Word 7 */
819 dptr += 8; /* Skip Words 8-15 in payload */
820 *wptr++ = *dptr++; /* Word 16 */
821 *wptr++ = *dptr++; /* Word 17 */
822 *wptr++ = *dptr++; /* Word 18 */
823 *wptr++ = *dptr++; /* Word 19 */
824 *wptr++ = *dptr++; /* Word 20 */
825 *wptr++ = *dptr++; /* Word 21 */
826 *wptr++ = *dptr++; /* Word 22 */
827 *wptr = *dptr; /* Word 23 */
4e565cf0
JS
828 } else {
829 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
830 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
831
832 /* Word 0-2 - NVME CMND IU Inline BDE */
833 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
834 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
835 wqe->generic.bde.addrHigh = sgl->addr_hi;
836 wqe->generic.bde.addrLow = sgl->addr_lo;
5fd11085
JS
837
838 /* Word 10 */
839 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
840 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
4e565cf0 841 }
01649561
JS
842
843 sgl++;
844
845 /* Setup the physical region for the FCP RSP */
846 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
847 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
848 sgl->word2 = le32_to_cpu(sgl->word2);
849 if (nCmd->sg_cnt)
850 bf_set(lpfc_sli4_sge_last, sgl, 0);
851 else
852 bf_set(lpfc_sli4_sge_last, sgl, 1);
853 sgl->word2 = cpu_to_le32(sgl->word2);
854 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
01649561
JS
855}
856
bd2cdd5e
JS
857#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
858static void
859lpfc_nvme_ktime(struct lpfc_hba *phba,
c490850a 860 struct lpfc_io_buf *lpfc_ncmd)
bd2cdd5e
JS
861{
862 uint64_t seg1, seg2, seg3, seg4;
c8a4ce0b 863 uint64_t segsum;
bd2cdd5e 864
bd2cdd5e
JS
865 if (!lpfc_ncmd->ts_last_cmd ||
866 !lpfc_ncmd->ts_cmd_start ||
867 !lpfc_ncmd->ts_cmd_wqput ||
868 !lpfc_ncmd->ts_isr_cmpl ||
869 !lpfc_ncmd->ts_data_nvme)
870 return;
c8a4ce0b
DK
871
872 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
873 return;
bd2cdd5e
JS
874 if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
875 return;
876 if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
877 return;
878 if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
879 return;
880 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
881 return;
882 /*
883 * Segment 1 - Time from Last FCP command cmpl is handed
884 * off to NVME Layer to start of next command.
885 * Segment 2 - Time from Driver receives a IO cmd start
886 * from NVME Layer to WQ put is done on IO cmd.
887 * Segment 3 - Time from Driver WQ put is done on IO cmd
888 * to MSI-X ISR for IO cmpl.
889 * Segment 4 - Time from MSI-X ISR for IO cmpl to when
890 * cmpl is handled off to the NVME Layer.
891 */
892 seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
c8a4ce0b
DK
893 if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
894 seg1 = 0;
bd2cdd5e
JS
895
896 /* Calculate times relative to start of IO */
897 seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
c8a4ce0b
DK
898 segsum = seg2;
899 seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
900 if (segsum > seg3)
901 return;
902 seg3 -= segsum;
903 segsum += seg3;
904
905 seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
906 if (segsum > seg4)
907 return;
908 seg4 -= segsum;
909
bd2cdd5e
JS
910 phba->ktime_data_samples++;
911 phba->ktime_seg1_total += seg1;
912 if (seg1 < phba->ktime_seg1_min)
913 phba->ktime_seg1_min = seg1;
914 else if (seg1 > phba->ktime_seg1_max)
915 phba->ktime_seg1_max = seg1;
916 phba->ktime_seg2_total += seg2;
917 if (seg2 < phba->ktime_seg2_min)
918 phba->ktime_seg2_min = seg2;
919 else if (seg2 > phba->ktime_seg2_max)
920 phba->ktime_seg2_max = seg2;
921 phba->ktime_seg3_total += seg3;
922 if (seg3 < phba->ktime_seg3_min)
923 phba->ktime_seg3_min = seg3;
924 else if (seg3 > phba->ktime_seg3_max)
925 phba->ktime_seg3_max = seg3;
926 phba->ktime_seg4_total += seg4;
927 if (seg4 < phba->ktime_seg4_min)
928 phba->ktime_seg4_min = seg4;
929 else if (seg4 > phba->ktime_seg4_max)
930 phba->ktime_seg4_max = seg4;
931
932 lpfc_ncmd->ts_last_cmd = 0;
933 lpfc_ncmd->ts_cmd_start = 0;
934 lpfc_ncmd->ts_cmd_wqput = 0;
935 lpfc_ncmd->ts_isr_cmpl = 0;
936 lpfc_ncmd->ts_data_nvme = 0;
937}
938#endif
939
01649561
JS
940/**
941 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
942 * @lpfc_pnvme: Pointer to the driver's nvme instance data
943 * @lpfc_nvme_lport: Pointer to the driver's local port data
944 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
945 *
946 * Driver registers this routine as it io request handler. This
947 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
948 * data structure to the rport indicated in @lpfc_nvme_rport.
949 *
950 * Return value :
951 * 0 - Success
952 * TODO: What are the failure codes.
953 **/
954static void
955lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
956 struct lpfc_wcqe_complete *wcqe)
957{
c490850a
JS
958 struct lpfc_io_buf *lpfc_ncmd =
959 (struct lpfc_io_buf *)pwqeIn->context1;
01649561
JS
960 struct lpfc_vport *vport = pwqeIn->vport;
961 struct nvmefc_fcp_req *nCmd;
962 struct nvme_fc_ersp_iu *ep;
963 struct nvme_fc_cmd_iu *cp;
01649561 964 struct lpfc_nodelist *ndlp;
bbe3012b 965 struct lpfc_nvme_fcpreq_priv *freqpriv;
4b056682 966 struct lpfc_nvme_lport *lport;
63df6d63 967 uint32_t code, status, idx, cpu;
01649561
JS
968 uint16_t cid, sqhd, data;
969 uint32_t *ptr;
970
971 /* Sanity check on return of outstanding command */
0b05e9fe 972 if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd) {
4d5e789a
JS
973 if (!lpfc_ncmd) {
974 lpfc_printf_vlog(vport, KERN_ERR,
975 LOG_NODE | LOG_NVME_IOERR,
976 "6071 Null lpfc_ncmd pointer. No "
977 "release, skip completion\n");
978 return;
979 }
980
01649561 981 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
4d5e789a 982 "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
0b05e9fe
JS
983 "nvmeCmd %p\n",
984 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
4d5e789a
JS
985
986 /* Release the lpfc_ncmd regardless of the missing elements. */
987 lpfc_release_nvme_buf(phba, lpfc_ncmd);
01649561
JS
988 return;
989 }
01649561 990 nCmd = lpfc_ncmd->nvmeCmd;
4b056682 991 status = bf_get(lpfc_wcqe_c_status, wcqe);
66a210ff 992
4c47efc1
JS
993 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
994 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
995
66a85155
JS
996 if (vport->localport) {
997 lport = (struct lpfc_nvme_lport *)vport->localport->private;
4c47efc1
JS
998 if (lport && status) {
999 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1000 atomic_inc(&lport->cmpl_fcp_xb);
1001 atomic_inc(&lport->cmpl_fcp_err);
66a210ff 1002 }
4b056682 1003 }
01649561 1004
bd2cdd5e
JS
1005 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
1006 lpfc_ncmd->cur_iocbq.sli4_xritag,
4b056682 1007 status, wcqe->parameter);
01649561
JS
1008 /*
1009 * Catch race where our node has transitioned, but the
1010 * transport is still transitioning.
1011 */
0b05e9fe 1012 ndlp = lpfc_ncmd->ndlp;
01649561 1013 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
0b05e9fe
JS
1014 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1015 "6062 Ignoring NVME cmpl. No ndlp\n");
1016 goto out_err;
01649561
JS
1017 }
1018
1019 code = bf_get(lpfc_wcqe_c_code, wcqe);
1020 if (code == CQE_CODE_NVME_ERSP) {
1021 /* For this type of CQE, we need to rebuild the rsp */
1022 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1023
1024 /*
1025 * Get Command Id from cmd to plug into response. This
1026 * code is not needed in the next NVME Transport drop.
1027 */
1028 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1029 cid = cp->sqe.common.command_id;
1030
1031 /*
1032 * RSN is in CQE word 2
1033 * SQHD is in CQE Word 3 bits 15:0
1034 * Cmd Specific info is in CQE Word 1
1035 * and in CQE Word 0 bits 15:0
1036 */
1037 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1038
1039 /* Now lets build the NVME ERSP IU */
1040 ep->iu_len = cpu_to_be16(8);
1041 ep->rsn = wcqe->parameter;
1042 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1043 ep->rsvd12 = 0;
1044 ptr = (uint32_t *)&ep->cqe.result.u64;
1045 *ptr++ = wcqe->total_data_placed;
1046 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1047 *ptr = (uint32_t)data;
1048 ep->cqe.sq_head = sqhd;
1049 ep->cqe.sq_id = nCmd->sqid;
1050 ep->cqe.command_id = cid;
1051 ep->cqe.status = 0;
1052
1053 lpfc_ncmd->status = IOSTAT_SUCCESS;
1054 lpfc_ncmd->result = 0;
1055 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1056 nCmd->transferred_length = nCmd->payload_length;
1057 } else {
4b056682 1058 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
952c303b 1059 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
01649561
JS
1060
1061 /* For NVME, the only failure path that results in an
1062 * IO error is when the adapter rejects it. All other
1063 * conditions are a success case and resolved by the
1064 * transport.
1065 * IOSTAT_FCP_RSP_ERROR means:
1066 * 1. Length of data received doesn't match total
1067 * transfer length in WQE
1068 * 2. If the RSP payload does NOT match these cases:
1069 * a. RSP length 12/24 bytes and all zeros
1070 * b. NVME ERSP
1071 */
1072 switch (lpfc_ncmd->status) {
1073 case IOSTAT_SUCCESS:
1074 nCmd->transferred_length = wcqe->total_data_placed;
1075 nCmd->rcv_rsplen = 0;
1076 nCmd->status = 0;
1077 break;
1078 case IOSTAT_FCP_RSP_ERROR:
1079 nCmd->transferred_length = wcqe->total_data_placed;
1080 nCmd->rcv_rsplen = wcqe->parameter;
1081 nCmd->status = 0;
1082 /* Sanity check */
1083 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
1084 break;
1085 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1086 "6081 NVME Completion Protocol Error: "
86c67379
JS
1087 "xri %x status x%x result x%x "
1088 "placed x%x\n",
1089 lpfc_ncmd->cur_iocbq.sli4_xritag,
01649561
JS
1090 lpfc_ncmd->status, lpfc_ncmd->result,
1091 wcqe->total_data_placed);
1092 break;
952c303b
DK
1093 case IOSTAT_LOCAL_REJECT:
1094 /* Let fall through to set command final state. */
1095 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1096 lpfc_printf_vlog(vport, KERN_INFO,
1097 LOG_NVME_IOERR,
1098 "6032 Delay Aborted cmd %p "
1099 "nvme cmd %p, xri x%x, "
1100 "xb %d\n",
1101 lpfc_ncmd, nCmd,
1102 lpfc_ncmd->cur_iocbq.sli4_xritag,
1103 bf_get(lpfc_wcqe_c_xb, wcqe));
01649561
JS
1104 default:
1105out_err:
e3246a12 1106 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
86c67379 1107 "6072 NVME Completion Error: xri %x "
01649561 1108 "status x%x result x%x placed x%x\n",
86c67379 1109 lpfc_ncmd->cur_iocbq.sli4_xritag,
01649561
JS
1110 lpfc_ncmd->status, lpfc_ncmd->result,
1111 wcqe->total_data_placed);
1112 nCmd->transferred_length = 0;
1113 nCmd->rcv_rsplen = 0;
8e009ce8 1114 nCmd->status = NVME_SC_INTERNAL;
01649561
JS
1115 }
1116 }
1117
1118 /* pick up SLI4 exhange busy condition */
1119 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1120 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1121 else
1122 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1123
01649561
JS
1124 /* Update stats and complete the IO. There is
1125 * no need for dma unprep because the nvme_transport
1126 * owns the dma address.
1127 */
bd2cdd5e 1128#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
c8a4ce0b 1129 if (lpfc_ncmd->ts_cmd_start) {
bd2cdd5e
JS
1130 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1131 lpfc_ncmd->ts_data_nvme = ktime_get_ns();
1132 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
1133 lpfc_nvme_ktime(phba, lpfc_ncmd);
1134 }
1135 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
63df6d63
JS
1136 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
1137 cpu = smp_processor_id();
1138 if (cpu < LPFC_CHECK_CPU_CNT) {
1139 if (lpfc_ncmd->cpu != cpu)
1140 lpfc_printf_vlog(vport,
1141 KERN_INFO, LOG_NVME_IOERR,
1142 "6701 CPU Check cmpl: "
1143 "cpu %d expect %d\n",
1144 cpu, lpfc_ncmd->cpu);
1145 phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
1146 }
bd2cdd5e
JS
1147 }
1148#endif
952c303b
DK
1149
1150 /* NVME targets need completion held off until the abort exchange
add9d6be 1151 * completes unless the NVME Rport is getting unregistered.
952c303b 1152 */
add9d6be 1153
3fd78355 1154 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
91455b85
JS
1155 freqpriv = nCmd->private;
1156 freqpriv->nvme_buf = NULL;
952c303b 1157 nCmd->done(nCmd);
3fd78355 1158 lpfc_ncmd->nvmeCmd = NULL;
add9d6be 1159 }
01649561 1160
952c303b 1161 /* Call release with XB=1 to queue the IO into the abort list. */
01649561
JS
1162 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1163}
1164
1165
1166/**
1167 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1168 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1169 * @lpfc_nvme_lport: Pointer to the driver's local port data
1170 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1171 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1172 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1173 *
1174 * Driver registers this routine as it io request handler. This
1175 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1176 * data structure to the rport indicated in @lpfc_nvme_rport.
1177 *
1178 * Return value :
1179 * 0 - Success
1180 * TODO: What are the failure codes.
1181 **/
1182static int
1183lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
c490850a 1184 struct lpfc_io_buf *lpfc_ncmd,
66a210ff 1185 struct lpfc_nodelist *pnode,
4c47efc1 1186 struct lpfc_fc4_ctrl_stat *cstat)
01649561
JS
1187{
1188 struct lpfc_hba *phba = vport->phba;
1189 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1190 struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
205e8240 1191 union lpfc_wqe128 *wqe = &pwqeq->wqe;
01649561
JS
1192 uint32_t req_len;
1193
e960f5ab 1194 if (!NLP_CHK_NODE_ACT(pnode))
01649561
JS
1195 return -EINVAL;
1196
1197 /*
1198 * There are three possibilities here - use scatter-gather segment, use
1199 * the single mapping, or neither.
1200 */
01649561
JS
1201 if (nCmd->sg_cnt) {
1202 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
5fd11085
JS
1203 /* From the iwrite template, initialize words 7 - 11 */
1204 memcpy(&wqe->words[7],
1205 &lpfc_iwrite_cmd_template.words[7],
1206 sizeof(uint32_t) * 5);
1207
1208 /* Word 4 */
1209 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1210
01649561
JS
1211 /* Word 5 */
1212 if ((phba->cfg_nvme_enable_fb) &&
1213 (pnode->nlp_flag & NLP_FIRSTBURST)) {
1214 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1215 if (req_len < pnode->nvme_fb_size)
1216 wqe->fcp_iwrite.initial_xfer_len =
1217 req_len;
1218 else
1219 wqe->fcp_iwrite.initial_xfer_len =
1220 pnode->nvme_fb_size;
5fd11085
JS
1221 } else {
1222 wqe->fcp_iwrite.initial_xfer_len = 0;
01649561 1223 }
4c47efc1 1224 cstat->output_requests++;
01649561 1225 } else {
5fd11085
JS
1226 /* From the iread template, initialize words 7 - 11 */
1227 memcpy(&wqe->words[7],
1228 &lpfc_iread_cmd_template.words[7],
1229 sizeof(uint32_t) * 5);
1230
1231 /* Word 4 */
1232 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1233
1234 /* Word 5 */
1235 wqe->fcp_iread.rsrvd5 = 0;
01649561 1236
4c47efc1 1237 cstat->input_requests++;
01649561
JS
1238 }
1239 } else {
5fd11085
JS
1240 /* From the icmnd template, initialize words 4 - 11 */
1241 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1242 sizeof(uint32_t) * 8);
4c47efc1 1243 cstat->control_requests++;
01649561
JS
1244 }
1245 /*
1246 * Finish initializing those WQE fields that are independent
1247 * of the nvme_cmnd request_buffer
1248 */
1249
5fd11085
JS
1250 /* Word 3 */
1251 bf_set(payload_offset_len, &wqe->fcp_icmd,
1252 (nCmd->rsplen + nCmd->cmdlen));
1253
01649561
JS
1254 /* Word 6 */
1255 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1256 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1257 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1258
01649561
JS
1259 /* Word 8 */
1260 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1261
1262 /* Word 9 */
1263 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1264
414abe0a
JS
1265 /* Words 13 14 15 are for PBDE support */
1266
01649561
JS
1267 pwqeq->vport = vport;
1268 return 0;
1269}
1270
1271
1272/**
1273 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1274 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1275 * @lpfc_nvme_lport: Pointer to the driver's local port data
1276 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1277 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1278 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1279 *
1280 * Driver registers this routine as it io request handler. This
1281 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1282 * data structure to the rport indicated in @lpfc_nvme_rport.
1283 *
1284 * Return value :
1285 * 0 - Success
1286 * TODO: What are the failure codes.
1287 **/
1288static int
1289lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
c490850a 1290 struct lpfc_io_buf *lpfc_ncmd)
01649561
JS
1291{
1292 struct lpfc_hba *phba = vport->phba;
1293 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
205e8240 1294 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
0794d601 1295 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
01649561
JS
1296 struct scatterlist *data_sg;
1297 struct sli4_sge *first_data_sgl;
0bc2b7c5 1298 struct ulp_bde64 *bde;
01649561
JS
1299 dma_addr_t physaddr;
1300 uint32_t num_bde = 0;
1301 uint32_t dma_len;
1302 uint32_t dma_offset = 0;
1303 int nseg, i;
1304
1305 /* Fix up the command and response DMA stuff. */
1306 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1307
1308 /*
1309 * There are three possibilities here - use scatter-gather segment, use
1310 * the single mapping, or neither.
1311 */
1312 if (nCmd->sg_cnt) {
1313 /*
1314 * Jump over the cmd and rsp SGEs. The fix routine
1315 * has already adjusted for this.
1316 */
1317 sgl += 2;
1318
1319 first_data_sgl = sgl;
1320 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
81e6a637 1321 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
01649561
JS
1322 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1323 "6058 Too many sg segments from "
1324 "NVME Transport. Max %d, "
1325 "nvmeIO sg_cnt %d\n",
29bfd55a 1326 phba->cfg_nvme_seg_cnt + 1,
01649561
JS
1327 lpfc_ncmd->seg_cnt);
1328 lpfc_ncmd->seg_cnt = 0;
1329 return 1;
1330 }
1331
1332 /*
1333 * The driver established a maximum scatter-gather segment count
1334 * during probe that limits the number of sg elements in any
1335 * single nvme command. Just run through the seg_cnt and format
1336 * the sge's.
1337 */
1338 nseg = nCmd->sg_cnt;
1339 data_sg = nCmd->first_sgl;
1340 for (i = 0; i < nseg; i++) {
1341 if (data_sg == NULL) {
1342 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1343 "6059 dptr err %d, nseg %d\n",
1344 i, nseg);
1345 lpfc_ncmd->seg_cnt = 0;
1346 return 1;
1347 }
1348 physaddr = data_sg->dma_address;
1349 dma_len = data_sg->length;
1350 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1351 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1352 sgl->word2 = le32_to_cpu(sgl->word2);
1353 if ((num_bde + 1) == nseg)
1354 bf_set(lpfc_sli4_sge_last, sgl, 1);
1355 else
1356 bf_set(lpfc_sli4_sge_last, sgl, 0);
1357 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1358 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1359 sgl->word2 = cpu_to_le32(sgl->word2);
1360 sgl->sge_len = cpu_to_le32(dma_len);
1361
1362 dma_offset += dma_len;
1363 data_sg = sg_next(data_sg);
1364 sgl++;
1365 }
414abe0a 1366 if (phba->cfg_enable_pbde) {
0bc2b7c5
JS
1367 /* Use PBDE support for first SGL only, offset == 0 */
1368 /* Words 13-15 */
1369 bde = (struct ulp_bde64 *)
1370 &wqe->words[13];
1371 bde->addrLow = first_data_sgl->addr_lo;
1372 bde->addrHigh = first_data_sgl->addr_hi;
1373 bde->tus.f.bdeSize =
1374 le32_to_cpu(first_data_sgl->sge_len);
1375 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1376 bde->tus.w = cpu_to_le32(bde->tus.w);
5fd11085
JS
1377 /* wqe_pbde is 1 in template */
1378 } else {
1379 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
0bc2b7c5 1380 bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
5fd11085 1381 }
0bc2b7c5 1382
414abe0a 1383 } else {
0794d601
JS
1384 lpfc_ncmd->seg_cnt = 0;
1385
01649561
JS
1386 /* For this clause to be valid, the payload_length
1387 * and sg_cnt must zero.
1388 */
1389 if (nCmd->payload_length != 0) {
1390 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1391 "6063 NVME DMA Prep Err: sg_cnt %d "
1392 "payload_length x%x\n",
1393 nCmd->sg_cnt, nCmd->payload_length);
1394 return 1;
1395 }
1396 }
01649561
JS
1397 return 0;
1398}
1399
1400/**
1401 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1402 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1403 * @lpfc_nvme_lport: Pointer to the driver's local port data
1404 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1405 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1406 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1407 *
1408 * Driver registers this routine as it io request handler. This
1409 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1410 * data structure to the rport
1411 indicated in @lpfc_nvme_rport.
1412 *
1413 * Return value :
1414 * 0 - Success
1415 * TODO: What are the failure codes.
1416 **/
1417static int
1418lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1419 struct nvme_fc_remote_port *pnvme_rport,
1420 void *hw_queue_handle,
1421 struct nvmefc_fcp_req *pnvme_fcreq)
1422{
1423 int ret = 0;
cf1a1d3e 1424 int expedite = 0;
63df6d63 1425 int idx, cpu;
01649561 1426 struct lpfc_nvme_lport *lport;
4c47efc1 1427 struct lpfc_fc4_ctrl_stat *cstat;
01649561
JS
1428 struct lpfc_vport *vport;
1429 struct lpfc_hba *phba;
1430 struct lpfc_nodelist *ndlp;
c490850a 1431 struct lpfc_io_buf *lpfc_ncmd;
01649561
JS
1432 struct lpfc_nvme_rport *rport;
1433 struct lpfc_nvme_qhandle *lpfc_queue_info;
c3725bdc 1434 struct lpfc_nvme_fcpreq_priv *freqpriv;
cf1a1d3e 1435 struct nvme_common_command *sqe;
bd2cdd5e
JS
1436#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1437 uint64_t start = 0;
1438#endif
01649561 1439
c3725bdc
JS
1440 /* Validate pointers. LLDD fault handling with transport does
1441 * have timing races.
1442 */
01649561 1443 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
c3725bdc
JS
1444 if (unlikely(!lport)) {
1445 ret = -EINVAL;
1446 goto out_fail;
1447 }
1448
01649561 1449 vport = lport->vport;
c3725bdc
JS
1450
1451 if (unlikely(!hw_queue_handle)) {
44c2757b
JS
1452 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1453 "6117 Fail IO, NULL hw_queue_handle\n");
1454 atomic_inc(&lport->xmt_fcp_err);
cd240071 1455 ret = -EBUSY;
c3725bdc
JS
1456 goto out_fail;
1457 }
1458
01649561
JS
1459 phba = vport->phba;
1460
1abcb371
DK
1461 if (vport->load_flag & FC_UNLOADING) {
1462 ret = -ENODEV;
1463 goto out_fail;
1464 }
1465
3386f4bd 1466 if (vport->load_flag & FC_UNLOADING) {
44c2757b
JS
1467 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1468 "6124 Fail IO, Driver unload\n");
1469 atomic_inc(&lport->xmt_fcp_err);
3386f4bd
JS
1470 ret = -ENODEV;
1471 goto out_fail;
1472 }
1473
c3725bdc
JS
1474 freqpriv = pnvme_fcreq->private;
1475 if (unlikely(!freqpriv)) {
44c2757b
JS
1476 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1477 "6158 Fail IO, NULL request data\n");
1478 atomic_inc(&lport->xmt_fcp_err);
c3725bdc 1479 ret = -EINVAL;
b7672ae6
DK
1480 goto out_fail;
1481 }
1482
bd2cdd5e
JS
1483#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1484 if (phba->ktime_on)
1485 start = ktime_get_ns();
1486#endif
01649561
JS
1487 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1488 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1489
1490 /*
1491 * Catch race where our node has transitioned, but the
1492 * transport is still transitioning.
1493 */
1494 ndlp = rport->ndlp;
1495 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
44c2757b
JS
1496 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1497 "6053 Fail IO, ndlp not ready: rport %p "
1498 "ndlp %p, DID x%06x\n",
01649561 1499 rport, ndlp, pnvme_rport->port_id);
44c2757b
JS
1500 atomic_inc(&lport->xmt_fcp_err);
1501 ret = -EBUSY;
1502 goto out_fail;
01649561
JS
1503 }
1504
1505 /* The remote node has to be a mapped target or it's an error. */
1506 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1507 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
44c2757b
JS
1508 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1509 "6036 Fail IO, DID x%06x not ready for "
cd240071 1510 "IO. State x%x, Type x%x Flg x%x\n",
44c2757b 1511 pnvme_rport->port_id,
cd240071
JS
1512 ndlp->nlp_state, ndlp->nlp_type,
1513 ndlp->upcall_flags);
4b056682 1514 atomic_inc(&lport->xmt_fcp_bad_ndlp);
cd240071 1515 ret = -EBUSY;
01649561
JS
1516 goto out_fail;
1517
1518 }
1519
cf1a1d3e
JS
1520 /* Currently only NVME Keep alive commands should be expedited
1521 * if the driver runs out of a resource. These should only be
1522 * issued on the admin queue, qidx 0
1523 */
1524 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1525 sqe = &((struct nvme_fc_cmd_iu *)
1526 pnvme_fcreq->cmdaddr)->sqe.common;
1527 if (sqe->opcode == nvme_admin_keep_alive)
1528 expedite = 1;
1529 }
1530
01649561
JS
1531 /* The node is shared with FCP IO, make sure the IO pending count does
1532 * not exceed the programmed depth.
1533 */
2a5b7d62
JS
1534 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1535 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1536 !expedite) {
1537 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1538 "6174 Fail IO, ndlp qdepth exceeded: "
1539 "idx %d DID %x pend %d qdepth %d\n",
1540 lpfc_queue_info->index, ndlp->nlp_DID,
1541 atomic_read(&ndlp->cmd_pending),
1542 ndlp->cmd_qdepth);
1543 atomic_inc(&lport->xmt_fcp_qdepth);
1544 ret = -EBUSY;
1545 goto out_fail;
1546 }
01649561
JS
1547 }
1548
6a828b0f 1549 /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
45aa312e
JS
1550 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1551 idx = lpfc_queue_info->index;
1552 } else {
1553 cpu = smp_processor_id();
6a828b0f 1554 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
45aa312e
JS
1555 }
1556
1557 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
01649561 1558 if (lpfc_ncmd == NULL) {
4b056682 1559 atomic_inc(&lport->xmt_fcp_noxri);
01649561 1560 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
44c2757b
JS
1561 "6065 Fail IO, driver buffer pool is empty: "
1562 "idx %d DID %x\n",
1563 lpfc_queue_info->index, ndlp->nlp_DID);
cd22d605 1564 ret = -EBUSY;
01649561
JS
1565 goto out_fail;
1566 }
bd2cdd5e 1567#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
c8a4ce0b 1568 if (start) {
bd2cdd5e
JS
1569 lpfc_ncmd->ts_cmd_start = start;
1570 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
c8a4ce0b
DK
1571 } else {
1572 lpfc_ncmd->ts_cmd_start = 0;
bd2cdd5e
JS
1573 }
1574#endif
01649561
JS
1575
1576 /*
1577 * Store the data needed by the driver to issue, abort, and complete
1578 * an IO.
1579 * Do not let the IO hang out forever. There is no midlayer issuing
1580 * an abort so inform the FW of the maximum IO pending time.
1581 */
bbe3012b 1582 freqpriv->nvme_buf = lpfc_ncmd;
01649561 1583 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
318083ad 1584 lpfc_ncmd->ndlp = ndlp;
0794d601 1585 lpfc_ncmd->qidx = lpfc_queue_info->qidx;
01649561 1586
01649561
JS
1587 /*
1588 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1589 * This identfier was create in our hardware queue create callback
1590 * routine. The driver now is dependent on the IO queue steering from
1591 * the transport. We are trusting the upper NVME layers know which
1592 * index to use and that they have affinitized a CPU to this hardware
1593 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1594 */
66a210ff 1595 lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
4c47efc1 1596 cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
66a210ff
JS
1597
1598 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1599 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1600 if (ret) {
44c2757b
JS
1601 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1602 "6175 Fail IO, Prep DMA: "
1603 "idx %d DID %x\n",
1604 lpfc_queue_info->index, ndlp->nlp_DID);
1605 atomic_inc(&lport->xmt_fcp_err);
66a210ff
JS
1606 ret = -ENOMEM;
1607 goto out_free_nvme_buf;
1608 }
1609
bd2cdd5e
JS
1610 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1611 lpfc_ncmd->cur_iocbq.sli4_xritag,
1612 lpfc_queue_info->index, ndlp->nlp_DID);
1613
1fbf9742 1614 ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
01649561 1615 if (ret) {
4b056682 1616 atomic_inc(&lport->xmt_fcp_wqerr);
e3246a12 1617 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
44c2757b 1618 "6113 Fail IO, Could not issue WQE err %x "
01649561
JS
1619 "sid: x%x did: x%x oxid: x%x\n",
1620 ret, vport->fc_myDID, ndlp->nlp_DID,
1621 lpfc_ncmd->cur_iocbq.sli4_xritag);
01649561
JS
1622 goto out_free_nvme_buf;
1623 }
1624
c490850a
JS
1625 if (phba->cfg_xri_rebalancing)
1626 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1627
bd2cdd5e 1628#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
c8a4ce0b 1629 if (lpfc_ncmd->ts_cmd_start)
bd2cdd5e
JS
1630 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1631
1632 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
63df6d63
JS
1633 cpu = smp_processor_id();
1634 if (cpu < LPFC_CHECK_CPU_CNT) {
1635 lpfc_ncmd->cpu = cpu;
1636 if (idx != cpu)
bd2cdd5e 1637 lpfc_printf_vlog(vport,
63df6d63 1638 KERN_INFO, LOG_NVME_IOERR,
bd2cdd5e
JS
1639 "6702 CPU Check cmd: "
1640 "cpu %d wq %d\n",
1641 lpfc_ncmd->cpu,
1642 lpfc_queue_info->index);
63df6d63 1643 phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++;
bd2cdd5e 1644 }
bd2cdd5e
JS
1645 }
1646#endif
01649561
JS
1647 return 0;
1648
1649 out_free_nvme_buf:
2cee7808
JS
1650 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1651 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
4c47efc1 1652 cstat->output_requests--;
2cee7808 1653 else
4c47efc1 1654 cstat->input_requests--;
2cee7808 1655 } else
4c47efc1 1656 cstat->control_requests--;
01649561
JS
1657 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1658 out_fail:
1659 return ret;
1660}
1661
1662/**
1663 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1664 * @phba: Pointer to HBA context object
1665 * @cmdiocb: Pointer to command iocb object.
1666 * @rspiocb: Pointer to response iocb object.
1667 *
1668 * This is the callback function for any NVME FCP IO that was aborted.
1669 *
1670 * Return value:
1671 * None
1672 **/
1673void
1674lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1675 struct lpfc_wcqe_complete *abts_cmpl)
1676{
e3246a12 1677 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
01649561
JS
1678 "6145 ABORT_XRI_CN completing on rpi x%x "
1679 "original iotag x%x, abort cmd iotag x%x "
1680 "req_tag x%x, status x%x, hwstatus x%x\n",
1681 cmdiocb->iocb.un.acxri.abortContextTag,
1682 cmdiocb->iocb.un.acxri.abortIoTag,
1683 cmdiocb->iotag,
1684 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1685 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1686 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1687 lpfc_sli_release_iocbq(phba, cmdiocb);
1688}
1689
1690/**
1691 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1692 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1693 * @lpfc_nvme_lport: Pointer to the driver's local port data
1694 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1695 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1696 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1697 *
1698 * Driver registers this routine as its nvme request io abort handler. This
1699 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1700 * data structure to the rport indicated in @lpfc_nvme_rport. This routine
1701 * is executed asynchronously - one the target is validated as "MAPPED" and
1702 * ready for IO, the driver issues the abort request and returns.
1703 *
1704 * Return value:
1705 * None
1706 **/
1707static void
1708lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1709 struct nvme_fc_remote_port *pnvme_rport,
1710 void *hw_queue_handle,
1711 struct nvmefc_fcp_req *pnvme_fcreq)
1712{
1713 struct lpfc_nvme_lport *lport;
1714 struct lpfc_vport *vport;
1715 struct lpfc_hba *phba;
c490850a 1716 struct lpfc_io_buf *lpfc_nbuf;
01649561
JS
1717 struct lpfc_iocbq *abts_buf;
1718 struct lpfc_iocbq *nvmereq_wqe;
c3725bdc 1719 struct lpfc_nvme_fcpreq_priv *freqpriv;
205e8240 1720 union lpfc_wqe128 *abts_wqe;
01649561
JS
1721 unsigned long flags;
1722 int ret_val;
1723
c3725bdc
JS
1724 /* Validate pointers. LLDD fault handling with transport does
1725 * have timing races.
1726 */
01649561 1727 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
c3725bdc
JS
1728 if (unlikely(!lport))
1729 return;
1730
01649561 1731 vport = lport->vport;
c3725bdc
JS
1732
1733 if (unlikely(!hw_queue_handle)) {
1734 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1735 "6129 Fail Abort, HW Queue Handle NULL.\n");
1736 return;
1737 }
1738
01649561 1739 phba = vport->phba;
c3725bdc 1740 freqpriv = pnvme_fcreq->private;
01649561 1741
c3725bdc
JS
1742 if (unlikely(!freqpriv))
1743 return;
3386f4bd
JS
1744 if (vport->load_flag & FC_UNLOADING)
1745 return;
1746
01649561 1747 /* Announce entry to new IO submit field. */
86c67379 1748 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
01649561
JS
1749 "6002 Abort Request to rport DID x%06x "
1750 "for nvme_fc_req %p\n",
1751 pnvme_rport->port_id,
1752 pnvme_fcreq);
1753
01649561
JS
1754 /* If the hba is getting reset, this flag is set. It is
1755 * cleared when the reset is complete and rings reestablished.
1756 */
1757 spin_lock_irqsave(&phba->hbalock, flags);
1758 /* driver queued commands are in process of being flushed */
1759 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
1760 spin_unlock_irqrestore(&phba->hbalock, flags);
86c67379 1761 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561
JS
1762 "6139 Driver in reset cleanup - flushing "
1763 "NVME Req now. hba_flag x%x\n",
1764 phba->hba_flag);
1765 return;
1766 }
1767
bbe3012b 1768 lpfc_nbuf = freqpriv->nvme_buf;
01649561
JS
1769 if (!lpfc_nbuf) {
1770 spin_unlock_irqrestore(&phba->hbalock, flags);
86c67379 1771 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561
JS
1772 "6140 NVME IO req has no matching lpfc nvme "
1773 "io buffer. Skipping abort req.\n");
1774 return;
1775 } else if (!lpfc_nbuf->nvmeCmd) {
1776 spin_unlock_irqrestore(&phba->hbalock, flags);
86c67379 1777 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561
JS
1778 "6141 lpfc NVME IO req has no nvme_fcreq "
1779 "io buffer. Skipping abort req.\n");
1780 return;
1781 }
2b7824d0 1782 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
01649561
JS
1783
1784 /*
1785 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1786 * state must match the nvme_fcreq passed by the nvme
1787 * transport. If they don't match, it is likely the driver
1788 * has already completed the NVME IO and the nvme transport
1789 * has not seen it yet.
1790 */
1791 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1792 spin_unlock_irqrestore(&phba->hbalock, flags);
86c67379 1793 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561
JS
1794 "6143 NVME req mismatch: "
1795 "lpfc_nbuf %p nvmeCmd %p, "
2b7824d0 1796 "pnvme_fcreq %p. Skipping Abort xri x%x\n",
01649561 1797 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
2b7824d0 1798 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
01649561
JS
1799 return;
1800 }
1801
1802 /* Don't abort IOs no longer on the pending queue. */
01649561
JS
1803 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1804 spin_unlock_irqrestore(&phba->hbalock, flags);
86c67379 1805 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561 1806 "6142 NVME IO req %p not queued - skipping "
2b7824d0
JS
1807 "abort req xri x%x\n",
1808 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
01649561
JS
1809 return;
1810 }
1811
4b056682 1812 atomic_inc(&lport->xmt_fcp_abort);
bd2cdd5e
JS
1813 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1814 nvmereq_wqe->sli4_xritag,
00cefeb9 1815 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
bd2cdd5e 1816
01649561
JS
1817 /* Outstanding abort is in progress */
1818 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1819 spin_unlock_irqrestore(&phba->hbalock, flags);
86c67379 1820 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561
JS
1821 "6144 Outstanding NVME I/O Abort Request "
1822 "still pending on nvme_fcreq %p, "
2b7824d0
JS
1823 "lpfc_ncmd %p xri x%x\n",
1824 pnvme_fcreq, lpfc_nbuf,
1825 nvmereq_wqe->sli4_xritag);
01649561
JS
1826 return;
1827 }
1828
1829 abts_buf = __lpfc_sli_get_iocbq(phba);
1830 if (!abts_buf) {
1831 spin_unlock_irqrestore(&phba->hbalock, flags);
86c67379 1832 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561 1833 "6136 No available abort wqes. Skipping "
2b7824d0
JS
1834 "Abts req for nvme_fcreq %p xri x%x\n",
1835 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
01649561
JS
1836 return;
1837 }
1838
1839 /* Ready - mark outstanding as aborted by driver. */
1840 nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
1841
1842 /* Complete prepping the abort wqe and issue to the FW. */
1843 abts_wqe = &abts_buf->wqe;
1844
1845 /* WQEs are reused. Clear stale data and set key fields to
1846 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1847 */
1848 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
1849 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1850
1851 /* word 7 */
01649561
JS
1852 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1853 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
1854 nvmereq_wqe->iocb.ulpClass);
1855
1856 /* word 8 - tell the FW to abort the IO associated with this
1857 * outstanding exchange ID.
1858 */
1859 abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
1860
1861 /* word 9 - this is the iotag for the abts_wqe completion. */
1862 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1863 abts_buf->iotag);
1864
1865 /* word 10 */
01649561
JS
1866 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1867 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1868
1869 /* word 11 */
1870 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1871 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1872 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1873
1874 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1875 abts_buf->iocb_flag |= LPFC_IO_NVME;
1876 abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
1877 abts_buf->vport = vport;
1878 abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
1fbf9742 1879 ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
01649561 1880 spin_unlock_irqrestore(&phba->hbalock, flags);
cd22d605 1881 if (ret_val) {
86c67379 1882 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561
JS
1883 "6137 Failed abts issue_wqe with status x%x "
1884 "for nvme_fcreq %p.\n",
1885 ret_val, pnvme_fcreq);
1886 lpfc_sli_release_iocbq(phba, abts_buf);
1887 return;
1888 }
1889
86c67379 1890 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
2b7824d0 1891 "6138 Transport Abort NVME Request Issued for "
01649561
JS
1892 "ox_id x%x on reqtag x%x\n",
1893 nvmereq_wqe->sli4_xritag,
1894 abts_buf->iotag);
1895}
1896
1897/* Declare and initialization an instance of the FC NVME template. */
1898static struct nvme_fc_port_template lpfc_nvme_template = {
1899 /* initiator-based functions */
1900 .localport_delete = lpfc_nvme_localport_delete,
1901 .remoteport_delete = lpfc_nvme_remoteport_delete,
1902 .create_queue = lpfc_nvme_create_queue,
1903 .delete_queue = lpfc_nvme_delete_queue,
1904 .ls_req = lpfc_nvme_ls_req,
1905 .fcp_io = lpfc_nvme_fcp_io_submit,
1906 .ls_abort = lpfc_nvme_ls_abort,
1907 .fcp_abort = lpfc_nvme_fcp_abort,
1908
1909 .max_hw_queues = 1,
1910 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1911 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1912 .dma_boundary = 0xFFFFFFFF,
1913
1914 /* Sizes of additional private data for data structures.
1915 * No use for the last two sizes at this time.
1916 */
1917 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1918 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1919 .lsrqst_priv_sz = 0,
bbe3012b 1920 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
01649561
JS
1921};
1922
01649561 1923/**
5e5b511d 1924 * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
01649561
JS
1925 * @phba: The HBA for which this call is being executed.
1926 *
5e5b511d 1927 * This routine removes a nvme buffer from head of @hdwq io_buf_list
01649561
JS
1928 * and returns to caller.
1929 *
1930 * Return codes:
1931 * NULL - Error
1932 * Pointer to lpfc_nvme_buf - Success
1933 **/
c490850a 1934static struct lpfc_io_buf *
cf1a1d3e 1935lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
5e5b511d 1936 int idx, int expedite)
01649561 1937{
c490850a 1938 struct lpfc_io_buf *lpfc_ncmd;
5e5b511d 1939 struct lpfc_sli4_hdw_queue *qp;
0794d601
JS
1940 struct sli4_sge *sgl;
1941 struct lpfc_iocbq *pwqeq;
1942 union lpfc_wqe128 *wqe;
01649561 1943
c490850a 1944 lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
2a5b7d62 1945
0794d601
JS
1946 if (lpfc_ncmd) {
1947 pwqeq = &(lpfc_ncmd->cur_iocbq);
1948 wqe = &pwqeq->wqe;
1949
1950 /* Setup key fields in buffer that may have been changed
1951 * if other protocols used this buffer.
1952 */
1953 pwqeq->iocb_flag = LPFC_IO_NVME;
1954 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
1955 lpfc_ncmd->start_time = jiffies;
1956 lpfc_ncmd->flags = 0;
1957
1958 /* Rsp SGE will be filled in when we rcv an IO
1959 * from the NVME Layer to be sent.
1960 * The cmd is going to be embedded so we need a SKIP SGE.
1961 */
1962 sgl = lpfc_ncmd->dma_sgl;
1963 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1964 bf_set(lpfc_sli4_sge_last, sgl, 0);
1965 sgl->word2 = cpu_to_le32(sgl->word2);
1966 /* Fill in word 3 / sgl_len during cmd submission */
1967
1968 /* Initialize WQE */
1969 memset(wqe, 0, sizeof(union lpfc_wqe));
1970
1971 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1972 atomic_inc(&ndlp->cmd_pending);
c490850a 1973 lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
0794d601 1974 }
5e5b511d 1975
c490850a
JS
1976 } else {
1977 qp = &phba->sli4_hba.hdwq[idx];
5e5b511d 1978 qp->empty_io_bufs++;
c490850a 1979 }
5e5b511d 1980
01649561
JS
1981 return lpfc_ncmd;
1982}
1983
1984/**
1985 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
1986 * @phba: The Hba for which this call is being executed.
1987 * @lpfc_ncmd: The nvme buffer which is being released.
1988 *
1989 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
5e5b511d 1990 * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
01649561
JS
1991 * and cannot be reused for at least RA_TOV amount of time if it was
1992 * aborted.
1993 **/
1994static void
c490850a 1995lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
01649561 1996{
5e5b511d 1997 struct lpfc_sli4_hdw_queue *qp;
01649561
JS
1998 unsigned long iflag = 0;
1999
c490850a 2000 if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2a5b7d62
JS
2001 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
2002
2a5b7d62 2003 lpfc_ncmd->ndlp = NULL;
c490850a 2004 lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
2a5b7d62 2005
1fbf9742 2006 qp = lpfc_ncmd->hdwq;
318083ad 2007 if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
86c67379
JS
2008 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2009 "6310 XB release deferred for "
2010 "ox_id x%x on reqtag x%x\n",
2011 lpfc_ncmd->cur_iocbq.sli4_xritag,
2012 lpfc_ncmd->cur_iocbq.iotag);
2013
5e5b511d 2014 spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag);
01649561 2015 list_add_tail(&lpfc_ncmd->list,
5e5b511d
JS
2016 &qp->lpfc_abts_nvme_buf_list);
2017 qp->abts_nvme_io_bufs++;
2018 spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag);
c490850a
JS
2019 } else
2020 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
01649561
JS
2021}
2022
2023/**
2024 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2025 * @pvport - the lpfc_vport instance requesting a localport.
2026 *
2027 * This routine is invoked to create an nvme localport instance to bind
2028 * to the nvme_fc_transport. It is called once during driver load
2029 * like lpfc_create_shost after all other services are initialized.
2030 * It requires a vport, vpi, and wwns at call time. Other localport
2031 * parameters are modified as the driver's FCID and the Fabric WWN
2032 * are established.
2033 *
2034 * Return codes
2035 * 0 - successful
2036 * -ENOMEM - no heap memory available
2037 * other values - from nvme registration upcall
2038 **/
2039int
2040lpfc_nvme_create_localport(struct lpfc_vport *vport)
2041{
166d7211 2042 int ret = 0;
01649561
JS
2043 struct lpfc_hba *phba = vport->phba;
2044 struct nvme_fc_port_info nfcp_info;
2045 struct nvme_fc_local_port *localport;
2046 struct lpfc_nvme_lport *lport;
01649561
JS
2047
2048 /* Initialize this localport instance. The vport wwn usage ensures
2049 * that NPIV is accounted for.
2050 */
2051 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2052 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2053 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2054 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2055
5b9e70b2
JS
2056 /* We need to tell the transport layer + 1 because it takes page
2057 * alignment into account. When space for the SGL is allocated we
2058 * allocate + 3, one for cmd, one for rsp and one for this alignment
4d4c4a4a 2059 */
4d4c4a4a 2060 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
6a828b0f
JS
2061
2062 /* Advertise how many hw queues we support based on fcp_io_sched */
2063 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
2064 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2065 else
2066 lpfc_nvme_template.max_hw_queues =
2067 phba->sli4_hba.num_present_cpu;
01649561
JS
2068
2069 /* localport is allocated from the stack, but the registration
2070 * call allocates heap memory as well as the private area.
2071 */
7d708033 2072#if (IS_ENABLED(CONFIG_NVME_FC))
01649561
JS
2073 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2074 &vport->phba->pcidev->dev, &localport);
166d7211
JS
2075#else
2076 ret = -ENOMEM;
2077#endif
01649561
JS
2078 if (!ret) {
2079 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2080 "6005 Successfully registered local "
2081 "NVME port num %d, localP %p, private %p, "
2082 "sg_seg %d\n",
2083 localport->port_num, localport,
2084 localport->private,
2085 lpfc_nvme_template.max_sgl_segments);
2086
2087 /* Private is our lport size declared in the template. */
2088 lport = (struct lpfc_nvme_lport *)localport->private;
2089 vport->localport = localport;
2090 lport->vport = vport;
01649561 2091 vport->nvmei_support = 1;
6b486ce9 2092
4b056682
JS
2093 atomic_set(&lport->xmt_fcp_noxri, 0);
2094 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2095 atomic_set(&lport->xmt_fcp_qdepth, 0);
44c2757b 2096 atomic_set(&lport->xmt_fcp_err, 0);
4b056682
JS
2097 atomic_set(&lport->xmt_fcp_wqerr, 0);
2098 atomic_set(&lport->xmt_fcp_abort, 0);
2099 atomic_set(&lport->xmt_ls_abort, 0);
2100 atomic_set(&lport->xmt_ls_err, 0);
2101 atomic_set(&lport->cmpl_fcp_xb, 0);
2102 atomic_set(&lport->cmpl_fcp_err, 0);
2103 atomic_set(&lport->cmpl_ls_xb, 0);
2104 atomic_set(&lport->cmpl_ls_err, 0);
66a210ff
JS
2105 atomic_set(&lport->fc4NvmeLsRequests, 0);
2106 atomic_set(&lport->fc4NvmeLsCmpls, 0);
4c47efc1 2107 }
01649561 2108
01649561
JS
2109 return ret;
2110}
2111
add9d6be
JS
2112/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2113 *
2114 * The driver has to wait for the host nvme transport to callback
2115 * indicating the localport has successfully unregistered all
2116 * resources. Since this is an uninterruptible wait, loop every ten
2117 * seconds and print a message indicating no progress.
2118 *
2119 * An uninterruptible wait is used because of the risk of transport-to-
2120 * driver state mismatch.
2121 */
2122void
2123lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2124 struct lpfc_nvme_lport *lport)
2125{
2126#if (IS_ENABLED(CONFIG_NVME_FC))
2127 u32 wait_tmo;
2128 int ret;
2129
2130 /* Host transport has to clean up and confirm requiring an indefinite
2131 * wait. Print a message if a 10 second wait expires and renew the
2132 * wait. This is unexpected.
2133 */
2134 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2135 while (true) {
2136 ret = wait_for_completion_timeout(&lport->lport_unreg_done,
2137 wait_tmo);
2138 if (unlikely(!ret)) {
2139 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
2140 "6176 Lport %p Localport %p wait "
2141 "timed out. Renewing.\n",
2142 lport, vport->localport);
2143 continue;
2144 }
2145 break;
2146 }
2147 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2148 "6177 Lport %p Localport %p Complete Success\n",
2149 lport, vport->localport);
2150#endif
2151}
2152
01649561
JS
2153/**
2154 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2155 * @pnvme: pointer to lpfc nvme data structure.
2156 *
2157 * This routine is invoked to destroy all lports bound to the phba.
2158 * The lport memory was allocated by the nvme fc transport and is
2159 * released there. This routine ensures all rports bound to the
2160 * lport have been disconnected.
2161 *
2162 **/
2163void
2164lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2165{
7d708033 2166#if (IS_ENABLED(CONFIG_NVME_FC))
01649561
JS
2167 struct nvme_fc_local_port *localport;
2168 struct lpfc_nvme_lport *lport;
01649561
JS
2169 int ret;
2170
2171 if (vport->nvmei_support == 0)
2172 return;
2173
2174 localport = vport->localport;
2175 vport->localport = NULL;
2176 lport = (struct lpfc_nvme_lport *)localport->private;
2177
2178 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2179 "6011 Destroying NVME localport %p\n",
2180 localport);
166d7211 2181
01649561
JS
2182 /* lport's rport list is clear. Unregister
2183 * lport and release resources.
2184 */
2185 init_completion(&lport->lport_unreg_done);
2186 ret = nvme_fc_unregister_localport(localport);
add9d6be
JS
2187
2188 /* Wait for completion. This either blocks
2189 * indefinitely or succeeds
2190 */
2191 lpfc_nvme_lport_unreg_wait(vport, lport);
01649561
JS
2192
2193 /* Regardless of the unregister upcall response, clear
2194 * nvmei_support. All rports are unregistered and the
2195 * driver will clean up.
2196 */
2197 vport->nvmei_support = 0;
2198 if (ret == 0) {
2199 lpfc_printf_vlog(vport,
2200 KERN_INFO, LOG_NVME_DISC,
2201 "6009 Unregistered lport Success\n");
2202 } else {
2203 lpfc_printf_vlog(vport,
2204 KERN_INFO, LOG_NVME_DISC,
2205 "6010 Unregistered lport "
2206 "Failed, status x%x\n",
2207 ret);
2208 }
166d7211 2209#endif
01649561
JS
2210}
2211
2212void
2213lpfc_nvme_update_localport(struct lpfc_vport *vport)
2214{
4410a67a 2215#if (IS_ENABLED(CONFIG_NVME_FC))
01649561
JS
2216 struct nvme_fc_local_port *localport;
2217 struct lpfc_nvme_lport *lport;
2218
2219 localport = vport->localport;
4410a67a
JS
2220 if (!localport) {
2221 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2222 "6710 Update NVME fail. No localport\n");
2223 return;
2224 }
01649561 2225 lport = (struct lpfc_nvme_lport *)localport->private;
4410a67a
JS
2226 if (!lport) {
2227 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2228 "6171 Update NVME fail. localP %p, No lport\n",
2229 localport);
2230 return;
2231 }
01649561
JS
2232 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2233 "6012 Update NVME lport %p did x%x\n",
2234 localport, vport->fc_myDID);
2235
2236 localport->port_id = vport->fc_myDID;
2237 if (localport->port_id == 0)
2238 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2239 else
2240 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2241
2242 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2243 "6030 bound lport %p to DID x%06x\n",
2244 lport, localport->port_id);
4410a67a 2245#endif
01649561
JS
2246}
2247
2248int
2249lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2250{
7d708033 2251#if (IS_ENABLED(CONFIG_NVME_FC))
01649561
JS
2252 int ret = 0;
2253 struct nvme_fc_local_port *localport;
2254 struct lpfc_nvme_lport *lport;
2255 struct lpfc_nvme_rport *rport;
01466024 2256 struct lpfc_nvme_rport *oldrport;
01649561
JS
2257 struct nvme_fc_remote_port *remote_port;
2258 struct nvme_fc_port_info rpinfo;
93a3922d 2259 struct lpfc_nodelist *prev_ndlp = NULL;
01649561
JS
2260
2261 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2262 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2263 ndlp->nlp_DID, ndlp->nlp_type);
2264
2265 localport = vport->localport;
bb6a8a2c
DK
2266 if (!localport)
2267 return 0;
2268
01649561
JS
2269 lport = (struct lpfc_nvme_lport *)localport->private;
2270
7a06dcd3
JS
2271 /* NVME rports are not preserved across devloss.
2272 * Just register this instance. Note, rpinfo->dev_loss_tmo
2273 * is left 0 to indicate accept transport defaults. The
2274 * driver communicates port role capabilities consistent
2275 * with the PRLI response data.
2276 */
2277 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2278 rpinfo.port_id = ndlp->nlp_DID;
2279 if (ndlp->nlp_type & NLP_NVME_TARGET)
2280 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2281 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2282 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2283
2284 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2285 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2286
2287 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2288 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
01466024 2289
9e210178 2290 spin_lock_irq(&vport->phba->hbalock);
01466024 2291 oldrport = lpfc_ndlp_get_nrport(ndlp);
9e210178 2292 spin_unlock_irq(&vport->phba->hbalock);
01466024 2293 if (!oldrport)
3fd78355
JS
2294 lpfc_nlp_get(ndlp);
2295
7a06dcd3
JS
2296 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2297 if (!ret) {
2298 /* If the ndlp already has an nrport, this is just
2299 * a resume of the existing rport. Else this is a
2300 * new rport.
01649561 2301 */
b15bd3e6
JS
2302 /* Guard against an unregister/reregister
2303 * race that leaves the WAIT flag set.
2304 */
2305 spin_lock_irq(&vport->phba->hbalock);
2306 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2307 spin_unlock_irq(&vport->phba->hbalock);
7a06dcd3 2308 rport = remote_port->private;
01466024 2309 if (oldrport) {
93a3922d
JS
2310 /* New remoteport record does not guarantee valid
2311 * host private memory area.
2312 */
2313 prev_ndlp = oldrport->ndlp;
01466024 2314 if (oldrport == remote_port->private) {
93a3922d
JS
2315 /* Same remoteport - ndlp should match.
2316 * Just reuse.
2317 */
3fd78355
JS
2318 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2319 LOG_NVME_DISC,
2320 "6014 Rebinding lport to "
2321 "remoteport %p wwpn 0x%llx, "
93a3922d 2322 "Data: x%x x%x %p %p x%x x%06x\n",
3fd78355
JS
2323 remote_port,
2324 remote_port->port_name,
2325 remote_port->port_id,
2326 remote_port->port_role,
93a3922d 2327 prev_ndlp,
3fd78355
JS
2328 ndlp,
2329 ndlp->nlp_type,
2330 ndlp->nlp_DID);
2331 return 0;
2332 }
2b75d0f9 2333
3fd78355
JS
2334 /* Sever the ndlp<->rport association
2335 * before dropping the ndlp ref from
2336 * register.
2b75d0f9 2337 */
3fd78355 2338 spin_lock_irq(&vport->phba->hbalock);
2b75d0f9 2339 ndlp->nrport = NULL;
01466024 2340 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
3fd78355 2341 spin_unlock_irq(&vport->phba->hbalock);
2b75d0f9 2342 rport->ndlp = NULL;
3fd78355 2343 rport->remoteport = NULL;
b04744ce
JS
2344
2345 /* Reference only removed if previous NDLP is no longer
2346 * active. It might be just a swap and removing the
2347 * reference would cause a premature cleanup.
2348 */
2349 if (prev_ndlp && prev_ndlp != ndlp) {
2350 if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
2351 (!prev_ndlp->nrport))
2352 lpfc_nlp_put(prev_ndlp);
2353 }
01649561 2354 }
2b75d0f9
DK
2355
2356 /* Clean bind the rport to the ndlp. */
2357 rport->remoteport = remote_port;
2358 rport->lport = lport;
3fd78355
JS
2359 rport->ndlp = ndlp;
2360 spin_lock_irq(&vport->phba->hbalock);
2b75d0f9 2361 ndlp->nrport = rport;
3fd78355 2362 spin_unlock_irq(&vport->phba->hbalock);
2b75d0f9
DK
2363 lpfc_printf_vlog(vport, KERN_INFO,
2364 LOG_NVME_DISC | LOG_NODE,
2365 "6022 Binding new rport to "
93a3922d 2366 "lport %p Remoteport %p rport %p WWNN 0x%llx, "
2b75d0f9 2367 "Rport WWPN 0x%llx DID "
93a3922d
JS
2368 "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
2369 lport, remote_port, rport,
2b75d0f9 2370 rpinfo.node_name, rpinfo.port_name,
3fd78355 2371 rpinfo.port_id, rpinfo.port_role,
93a3922d 2372 ndlp, prev_ndlp);
01649561 2373 } else {
7a06dcd3
JS
2374 lpfc_printf_vlog(vport, KERN_ERR,
2375 LOG_NVME_DISC | LOG_NODE,
2376 "6031 RemotePort Registration failed "
2377 "err: %d, DID x%06x\n",
2378 ret, ndlp->nlp_DID);
01649561 2379 }
7a06dcd3 2380
01649561 2381 return ret;
166d7211
JS
2382#else
2383 return 0;
2384#endif
01649561
JS
2385}
2386
2387/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2388 *
2389 * There is no notion of Devloss or rport recovery from the current
2390 * nvme_transport perspective. Loss of an rport just means IO cannot
2391 * be sent and recovery is completely up to the initator.
2392 * For now, the driver just unbinds the DID and port_role so that
2393 * no further IO can be issued. Changes are planned for later.
2394 *
2395 * Notes - the ndlp reference count is not decremented here since
2396 * since there is no nvme_transport api for devloss. Node ref count
2397 * is only adjusted in driver unload.
2398 */
2399void
2400lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2401{
7d708033 2402#if (IS_ENABLED(CONFIG_NVME_FC))
01649561
JS
2403 int ret;
2404 struct nvme_fc_local_port *localport;
2405 struct lpfc_nvme_lport *lport;
2406 struct lpfc_nvme_rport *rport;
9e210178 2407 struct nvme_fc_remote_port *remoteport = NULL;
01649561
JS
2408
2409 localport = vport->localport;
2410
2411 /* This is fundamental error. The localport is always
2412 * available until driver unload. Just exit.
2413 */
2414 if (!localport)
2415 return;
2416
2417 lport = (struct lpfc_nvme_lport *)localport->private;
2418 if (!lport)
2419 goto input_err;
2420
9e210178 2421 spin_lock_irq(&vport->phba->hbalock);
01466024 2422 rport = lpfc_ndlp_get_nrport(ndlp);
9e210178
JS
2423 if (rport)
2424 remoteport = rport->remoteport;
2425 spin_unlock_irq(&vport->phba->hbalock);
2426 if (!remoteport)
01649561
JS
2427 goto input_err;
2428
01649561
JS
2429 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2430 "6033 Unreg nvme remoteport %p, portname x%llx, "
2431 "port_id x%06x, portstate x%x port type x%x\n",
2432 remoteport, remoteport->port_name,
2433 remoteport->port_id, remoteport->port_state,
2434 ndlp->nlp_type);
2435
2436 /* Sanity check ndlp type. Only call for NVME ports. Don't
2437 * clear any rport state until the transport calls back.
2438 */
3b5bde69
JS
2439
2440 if (ndlp->nlp_type & NLP_NVME_TARGET) {
7a06dcd3
JS
2441 /* No concern about the role change on the nvme remoteport.
2442 * The transport will update it.
2443 */
add9d6be 2444 ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
7438273f
JS
2445
2446 /* Don't let the host nvme transport keep sending keep-alives
2447 * on this remoteport. Vport is unloading, no recovery. The
2448 * return values is ignored. The upcall is a courtesy to the
2449 * transport.
2450 */
2451 if (vport->load_flag & FC_UNLOADING)
2452 (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2453
01649561 2454 ret = nvme_fc_unregister_remoteport(remoteport);
3fd78355
JS
2455 if (ret != 0) {
2456 lpfc_nlp_put(ndlp);
01649561
JS
2457 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2458 "6167 NVME unregister failed %d "
2459 "port_state x%x\n",
2460 ret, remoteport->port_state);
3fd78355 2461 }
01649561
JS
2462 }
2463 return;
2464
2465 input_err:
166d7211 2466#endif
01649561 2467 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2b7824d0 2468 "6168 State error: lport %p, rport%p FCID x%06x\n",
01649561
JS
2469 vport->localport, ndlp->rport, ndlp->nlp_DID);
2470}
318083ad
JS
2471
2472/**
2473 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2474 * @phba: pointer to lpfc hba data structure.
2475 * @axri: pointer to the fcp xri abort wcqe structure.
2476 *
2477 * This routine is invoked by the worker thread to process a SLI4 fast-path
952c303b
DK
2478 * NVME aborted xri. Aborted NVME IO commands are completed to the transport
2479 * here.
318083ad
JS
2480 **/
2481void
2482lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
5e5b511d 2483 struct sli4_wcqe_xri_aborted *axri, int idx)
318083ad
JS
2484{
2485 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
c490850a 2486 struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd;
952c303b 2487 struct nvmefc_fcp_req *nvme_cmd = NULL;
318083ad 2488 struct lpfc_nodelist *ndlp;
5e5b511d 2489 struct lpfc_sli4_hdw_queue *qp;
318083ad 2490 unsigned long iflag = 0;
318083ad
JS
2491
2492 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
2493 return;
5e5b511d 2494 qp = &phba->sli4_hba.hdwq[idx];
318083ad 2495 spin_lock_irqsave(&phba->hbalock, iflag);
5e5b511d 2496 spin_lock(&qp->abts_nvme_buf_list_lock);
318083ad 2497 list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
5e5b511d 2498 &qp->lpfc_abts_nvme_buf_list, list) {
318083ad 2499 if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
bbe3012b 2500 list_del_init(&lpfc_ncmd->list);
5e5b511d 2501 qp->abts_nvme_io_bufs--;
318083ad
JS
2502 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2503 lpfc_ncmd->status = IOSTAT_SUCCESS;
5e5b511d 2504 spin_unlock(&qp->abts_nvme_buf_list_lock);
318083ad 2505
318083ad
JS
2506 spin_unlock_irqrestore(&phba->hbalock, iflag);
2507 ndlp = lpfc_ncmd->ndlp;
952c303b 2508 if (ndlp)
318083ad 2509 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
86c67379
JS
2510
2511 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
952c303b
DK
2512 "6311 nvme_cmd %p xri x%x tag x%x "
2513 "abort complete and xri released\n",
2514 lpfc_ncmd->nvmeCmd, xri,
2515 lpfc_ncmd->cur_iocbq.iotag);
2516
2517 /* Aborted NVME commands are required to not complete
2518 * before the abort exchange command fully completes.
2519 * Once completed, it is available via the put list.
2520 */
3fd78355
JS
2521 if (lpfc_ncmd->nvmeCmd) {
2522 nvme_cmd = lpfc_ncmd->nvmeCmd;
2523 nvme_cmd->done(nvme_cmd);
2524 lpfc_ncmd->nvmeCmd = NULL;
2525 }
318083ad 2526 lpfc_release_nvme_buf(phba, lpfc_ncmd);
318083ad
JS
2527 return;
2528 }
2529 }
5e5b511d 2530 spin_unlock(&qp->abts_nvme_buf_list_lock);
318083ad 2531 spin_unlock_irqrestore(&phba->hbalock, iflag);
86c67379
JS
2532
2533 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2534 "6312 XRI Aborted xri x%x not found\n", xri);
2535
318083ad 2536}
c3725bdc
JS
2537
2538/**
2539 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2540 * @phba: Pointer to HBA context object.
2541 *
2542 * This function flushes all wqes in the nvme rings and frees all resources
2543 * in the txcmplq. This function does not issue abort wqes for the IO
2544 * commands in txcmplq, they will just be returned with
2545 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2546 * slot has been permanently disabled.
2547 **/
2548void
2549lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2550{
2551 struct lpfc_sli_ring *pring;
2552 u32 i, wait_cnt = 0;
2553
cdb42bec 2554 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
c3725bdc
JS
2555 return;
2556
2557 /* Cycle through all NVME rings and make sure all outstanding
2558 * WQEs have been removed from the txcmplqs.
2559 */
cdb42bec 2560 for (i = 0; i < phba->cfg_hdw_queue; i++) {
6a828b0f
JS
2561 if (!phba->sli4_hba.hdwq[i].nvme_wq)
2562 continue;
cdb42bec 2563 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
c3725bdc 2564
d580c613
JS
2565 if (!pring)
2566 continue;
2567
c3725bdc
JS
2568 /* Retrieve everything on the txcmplq */
2569 while (!list_empty(&pring->txcmplq)) {
2570 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2571 wait_cnt++;
2572
2573 /* The sleep is 10mS. Every ten seconds,
2574 * dump a message. Something is wrong.
2575 */
2576 if ((wait_cnt % 1000) == 0) {
2577 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2578 "6178 NVME IO not empty, "
2579 "cnt %d\n", wait_cnt);
2580 }
2581 }
2582 }
2583}