scsi: lpfc: Add simple unlikely optimizations to reduce NVME latency
[linux-2.6-block.git] / drivers / scsi / lpfc / lpfc_nvme.c
CommitLineData
01649561
JS
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
0d041215 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
3e21d1cb 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
01649561
JS
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
01649561
JS
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/unaligned.h>
28#include <linux/crc-t10dif.h>
29#include <net/checksum.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include <linux/nvme.h>
40#include <linux/nvme-fc-driver.h>
41#include <linux/nvme-fc.h>
42#include "lpfc_version.h"
43#include "lpfc_hw4.h"
44#include "lpfc_hw.h"
45#include "lpfc_sli.h"
46#include "lpfc_sli4.h"
47#include "lpfc_nl.h"
48#include "lpfc_disc.h"
49#include "lpfc.h"
50#include "lpfc_nvme.h"
51#include "lpfc_scsi.h"
52#include "lpfc_logmsg.h"
53#include "lpfc_crtn.h"
54#include "lpfc_vport.h"
bd2cdd5e 55#include "lpfc_debugfs.h"
01649561
JS
56
57/* NVME initiator-based functions */
58
c490850a 59static struct lpfc_io_buf *
cf1a1d3e 60lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
5e5b511d 61 int idx, int expedite);
01649561
JS
62
63static void
c490850a 64lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
01649561 65
81e6a637 66static struct nvme_fc_port_template lpfc_nvme_template;
01649561 67
fab2e466
CIK
68static union lpfc_wqe128 lpfc_iread_cmd_template;
69static union lpfc_wqe128 lpfc_iwrite_cmd_template;
70static union lpfc_wqe128 lpfc_icmnd_cmd_template;
5fd11085
JS
71
72/* Setup WQE templates for NVME IOs */
73void
fab2e466 74lpfc_nvme_cmd_template(void)
5fd11085
JS
75{
76 union lpfc_wqe128 *wqe;
77
78 /* IREAD template */
79 wqe = &lpfc_iread_cmd_template;
80 memset(wqe, 0, sizeof(union lpfc_wqe128));
81
82 /* Word 0, 1, 2 - BDE is variable */
83
84 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
85
86 /* Word 4 - total_xfer_len is variable */
87
88 /* Word 5 - is zero */
89
90 /* Word 6 - ctxt_tag, xri_tag is variable */
91
92 /* Word 7 */
93 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
94 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
95 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
96 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
97
98 /* Word 8 - abort_tag is variable */
99
100 /* Word 9 - reqtag is variable */
101
102 /* Word 10 - dbde, wqes is variable */
103 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
104 bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1);
105 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
106 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
107 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
108 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
109
110 /* Word 11 - pbde is variable */
111 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD);
112 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
113 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
114
115 /* Word 12 - is zero */
116
117 /* Word 13, 14, 15 - PBDE is variable */
118
119 /* IWRITE template */
120 wqe = &lpfc_iwrite_cmd_template;
121 memset(wqe, 0, sizeof(union lpfc_wqe128));
122
123 /* Word 0, 1, 2 - BDE is variable */
124
125 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
126
127 /* Word 4 - total_xfer_len is variable */
128
129 /* Word 5 - initial_xfer_len is variable */
130
131 /* Word 6 - ctxt_tag, xri_tag is variable */
132
133 /* Word 7 */
134 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
135 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
136 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
137 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
138
139 /* Word 8 - abort_tag is variable */
140
141 /* Word 9 - reqtag is variable */
142
143 /* Word 10 - dbde, wqes is variable */
144 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
145 bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1);
146 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
147 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
148 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
149 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
150
151 /* Word 11 - pbde is variable */
152 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD);
153 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
154 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
155
156 /* Word 12 - is zero */
157
158 /* Word 13, 14, 15 - PBDE is variable */
159
160 /* ICMND template */
161 wqe = &lpfc_icmnd_cmd_template;
162 memset(wqe, 0, sizeof(union lpfc_wqe128));
163
164 /* Word 0, 1, 2 - BDE is variable */
165
166 /* Word 3 - payload_offset_len is variable */
167
168 /* Word 4, 5 - is zero */
169
170 /* Word 6 - ctxt_tag, xri_tag is variable */
171
172 /* Word 7 */
173 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
174 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
175 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
176 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
177
178 /* Word 8 - abort_tag is variable */
179
180 /* Word 9 - reqtag is variable */
181
182 /* Word 10 - dbde, wqes is variable */
183 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
184 bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
185 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
186 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
187 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
188 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
189
190 /* Word 11 */
191 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND);
192 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
193 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
194
195 /* Word 12, 13, 14, 15 - is zero */
196}
197
01649561
JS
198/**
199 * lpfc_nvme_create_queue -
200 * @lpfc_pnvme: Pointer to the driver's nvme instance data
201 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
202 * @handle: An opaque driver handle used in follow-up calls.
203 *
204 * Driver registers this routine to preallocate and initialize any
205 * internal data structures to bind the @qidx to its internal IO queues.
206 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
207 *
208 * Return value :
209 * 0 - Success
210 * -EINVAL - Unsupported input value.
211 * -ENOMEM - Could not alloc necessary memory
212 **/
213static int
214lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
215 unsigned int qidx, u16 qsize,
216 void **handle)
217{
218 struct lpfc_nvme_lport *lport;
219 struct lpfc_vport *vport;
220 struct lpfc_nvme_qhandle *qhandle;
221 char *str;
222
c3725bdc
JS
223 if (!pnvme_lport->private)
224 return -ENOMEM;
225
01649561
JS
226 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
227 vport = lport->vport;
228 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
229 if (qhandle == NULL)
230 return -ENOMEM;
231
d6d189ce 232 qhandle->cpu_id = raw_smp_processor_id();
01649561
JS
233 qhandle->qidx = qidx;
234 /*
235 * NVME qidx == 0 is the admin queue, so both admin queue
236 * and first IO queue will use MSI-X vector and associated
237 * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
238 */
239 if (qidx) {
240 str = "IO "; /* IO queue */
241 qhandle->index = ((qidx - 1) %
6a828b0f 242 lpfc_nvme_template.max_hw_queues);
01649561
JS
243 } else {
244 str = "ADM"; /* Admin queue */
245 qhandle->index = qidx;
246 }
247
d58734f0 248 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
01649561 249 "6073 Binding %s HdwQueue %d (cpu %d) to "
cdb42bec 250 "hdw_queue %d qhandle %p\n", str,
01649561
JS
251 qidx, qhandle->cpu_id, qhandle->index, qhandle);
252 *handle = (void *)qhandle;
253 return 0;
254}
255
256/**
257 * lpfc_nvme_delete_queue -
258 * @lpfc_pnvme: Pointer to the driver's nvme instance data
259 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
260 * @handle: An opaque driver handle from lpfc_nvme_create_queue
261 *
262 * Driver registers this routine to free
263 * any internal data structures to bind the @qidx to its internal
264 * IO queues.
265 *
266 * Return value :
267 * 0 - Success
268 * TODO: What are the failure codes.
269 **/
270static void
271lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
272 unsigned int qidx,
273 void *handle)
274{
275 struct lpfc_nvme_lport *lport;
276 struct lpfc_vport *vport;
277
c3725bdc
JS
278 if (!pnvme_lport->private)
279 return;
280
01649561
JS
281 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
282 vport = lport->vport;
283
284 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2879265f 285 "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
01649561
JS
286 lport, qidx, handle);
287 kfree(handle);
288}
289
290static void
291lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
292{
293 struct lpfc_nvme_lport *lport = localport->private;
294
add9d6be
JS
295 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
296 "6173 localport %p delete complete\n",
297 lport);
298
01649561 299 /* release any threads waiting for the unreg to complete */
7961cba6
EM
300 if (lport->vport->localport)
301 complete(lport->lport_unreg_cmp);
01649561
JS
302}
303
304/* lpfc_nvme_remoteport_delete
305 *
306 * @remoteport: Pointer to an nvme transport remoteport instance.
307 *
308 * This is a template downcall. NVME transport calls this function
309 * when it has completed the unregistration of a previously
310 * registered remoteport.
311 *
312 * Return value :
313 * None
314 */
3999df75 315static void
01649561
JS
316lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
317{
318 struct lpfc_nvme_rport *rport = remoteport->private;
319 struct lpfc_vport *vport;
320 struct lpfc_nodelist *ndlp;
321
322 ndlp = rport->ndlp;
323 if (!ndlp)
324 goto rport_err;
325
326 vport = ndlp->vport;
327 if (!vport)
328 goto rport_err;
329
330 /* Remove this rport from the lport's list - memory is owned by the
331 * transport. Remove the ndlp reference for the NVME transport before
7a06dcd3 332 * calling state machine to remove the node.
01649561
JS
333 */
334 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
3fd78355 335 "6146 remoteport delete of remoteport %p\n",
01649561 336 remoteport);
3fd78355 337 spin_lock_irq(&vport->phba->hbalock);
b15bd3e6
JS
338
339 /* The register rebind might have occurred before the delete
340 * downcall. Guard against this race.
341 */
342 if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
343 ndlp->nrport = NULL;
344 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
345 }
3fd78355
JS
346 spin_unlock_irq(&vport->phba->hbalock);
347
348 /* Remove original register reference. The host transport
349 * won't reference this rport/remoteport any further.
350 */
01649561
JS
351 lpfc_nlp_put(ndlp);
352
353 rport_err:
3fd78355 354 return;
01649561
JS
355}
356
357static void
358lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
359 struct lpfc_wcqe_complete *wcqe)
360{
361 struct lpfc_vport *vport = cmdwqe->vport;
4b056682 362 struct lpfc_nvme_lport *lport;
01649561
JS
363 uint32_t status;
364 struct nvmefc_ls_req *pnvme_lsreq;
365 struct lpfc_dmabuf *buf_ptr;
366 struct lpfc_nodelist *ndlp;
367
01649561
JS
368 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
369 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
66a210ff 370
66a85155
JS
371 if (vport->localport) {
372 lport = (struct lpfc_nvme_lport *)vport->localport->private;
373 if (lport) {
374 atomic_inc(&lport->fc4NvmeLsCmpls);
375 if (status) {
376 if (bf_get(lpfc_wcqe_c_xb, wcqe))
377 atomic_inc(&lport->cmpl_ls_xb);
378 atomic_inc(&lport->cmpl_ls_err);
379 }
66a210ff 380 }
4b056682
JS
381 }
382
01649561
JS
383 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
384 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
385 "6047 nvme cmpl Enter "
815a9c43
JS
386 "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
387 "lsreg:%p bmp:%p ndlp:%p\n",
01649561
JS
388 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
389 cmdwqe->sli4_xritag, status,
815a9c43 390 (wcqe->parameter & 0xffff),
01649561
JS
391 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
392
bd2cdd5e
JS
393 lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
394 cmdwqe->sli4_xritag, status, wcqe->parameter);
395
01649561
JS
396 if (cmdwqe->context3) {
397 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
398 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
399 kfree(buf_ptr);
400 cmdwqe->context3 = NULL;
401 }
402 if (pnvme_lsreq->done)
403 pnvme_lsreq->done(pnvme_lsreq, status);
404 else
405 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
406 "6046 nvme cmpl without done call back? "
407 "Data %p DID %x Xri: %x status %x\n",
408 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
409 cmdwqe->sli4_xritag, status);
410 if (ndlp) {
411 lpfc_nlp_put(ndlp);
412 cmdwqe->context1 = NULL;
413 }
414 lpfc_sli_release_iocbq(phba, cmdwqe);
415}
416
417static int
418lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
419 struct lpfc_dmabuf *inp,
205e8240
JS
420 struct nvmefc_ls_req *pnvme_lsreq,
421 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
422 struct lpfc_wcqe_complete *),
423 struct lpfc_nodelist *ndlp, uint32_t num_entry,
424 uint32_t tmo, uint8_t retry)
01649561 425{
205e8240
JS
426 struct lpfc_hba *phba = vport->phba;
427 union lpfc_wqe128 *wqe;
01649561
JS
428 struct lpfc_iocbq *genwqe;
429 struct ulp_bde64 *bpl;
430 struct ulp_bde64 bde;
431 int i, rc, xmit_len, first_len;
432
433 /* Allocate buffer for command WQE */
434 genwqe = lpfc_sli_get_iocbq(phba);
435 if (genwqe == NULL)
436 return 1;
437
438 wqe = &genwqe->wqe;
d9f492a1 439 /* Initialize only 64 bytes */
01649561
JS
440 memset(wqe, 0, sizeof(union lpfc_wqe));
441
442 genwqe->context3 = (uint8_t *)bmp;
443 genwqe->iocb_flag |= LPFC_IO_NVME_LS;
444
445 /* Save for completion so we can release these resources */
446 genwqe->context1 = lpfc_nlp_get(ndlp);
447 genwqe->context2 = (uint8_t *)pnvme_lsreq;
448 /* Fill in payload, bp points to frame payload */
449
450 if (!tmo)
451 /* FC spec states we need 3 * ratov for CT requests */
452 tmo = (3 * phba->fc_ratov);
453
454 /* For this command calculate the xmit length of the request bde. */
455 xmit_len = 0;
456 first_len = 0;
457 bpl = (struct ulp_bde64 *)bmp->virt;
458 for (i = 0; i < num_entry; i++) {
459 bde.tus.w = bpl[i].tus.w;
460 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
461 break;
462 xmit_len += bde.tus.f.bdeSize;
463 if (i == 0)
464 first_len = xmit_len;
465 }
466
467 genwqe->rsvd2 = num_entry;
468 genwqe->hba_wqidx = 0;
469
470 /* Words 0 - 2 */
471 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
472 wqe->generic.bde.tus.f.bdeSize = first_len;
473 wqe->generic.bde.addrLow = bpl[0].addrLow;
474 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
475
476 /* Word 3 */
477 wqe->gen_req.request_payload_len = first_len;
478
479 /* Word 4 */
480
481 /* Word 5 */
482 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
483 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
484 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
8b361639 485 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
01649561
JS
486 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
487
488 /* Word 6 */
489 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
490 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
491 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
492
493 /* Word 7 */
494 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
495 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
496 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
497 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
498
499 /* Word 8 */
500 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
501
502 /* Word 9 */
503 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
504
505 /* Word 10 */
506 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
507 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
508 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
509 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
510 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
511
512 /* Word 11 */
513 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
514 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
515
516
517 /* Issue GEN REQ WQE for NPORT <did> */
518 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
519 "6050 Issue GEN REQ WQE to NPORT x%x "
520 "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
521 ndlp->nlp_DID, genwqe->iotag,
522 vport->port_state,
523 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
524 genwqe->wqe_cmpl = cmpl;
525 genwqe->iocb_cmpl = NULL;
526 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
527 genwqe->vport = vport;
528 genwqe->retry = retry;
529
bd2cdd5e
JS
530 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
531 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
532
1fbf9742 533 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
cd22d605 534 if (rc) {
01649561
JS
535 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
536 "6045 Issue GEN REQ WQE to NPORT x%x "
537 "Data: x%x x%x\n",
538 ndlp->nlp_DID, genwqe->iotag,
539 vport->port_state);
540 lpfc_sli_release_iocbq(phba, genwqe);
541 return 1;
542 }
543 return 0;
544}
545
546/**
547 * lpfc_nvme_ls_req - Issue an Link Service request
548 * @lpfc_pnvme: Pointer to the driver's nvme instance data
549 * @lpfc_nvme_lport: Pointer to the driver's local port data
550 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
551 *
552 * Driver registers this routine to handle any link service request
553 * from the nvme_fc transport to a remote nvme-aware port.
554 *
555 * Return value :
556 * 0 - Success
557 * TODO: What are the failure codes.
558 **/
559static int
560lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
561 struct nvme_fc_remote_port *pnvme_rport,
562 struct nvmefc_ls_req *pnvme_lsreq)
563{
564 int ret = 0;
565 struct lpfc_nvme_lport *lport;
815a9c43 566 struct lpfc_nvme_rport *rport;
01649561
JS
567 struct lpfc_vport *vport;
568 struct lpfc_nodelist *ndlp;
569 struct ulp_bde64 *bpl;
570 struct lpfc_dmabuf *bmp;
ba43c4d0 571 uint16_t ntype, nstate;
01649561
JS
572
573 /* there are two dma buf in the request, actually there is one and
574 * the second one is just the start address + cmd size.
575 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
576 * in a lpfc_dmabuf struct. When freeing we just free the wrapper
577 * because the nvem layer owns the data bufs.
578 * We do not have to break these packets open, we don't care what is in
579 * them. And we do not have to look at the resonse data, we only care
580 * that we got a response. All of the caring is going to happen in the
581 * nvme-fc layer.
582 */
583
584 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
815a9c43 585 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
66a210ff
JS
586 if (unlikely(!lport) || unlikely(!rport))
587 return -EINVAL;
588
01649561
JS
589 vport = lport->vport;
590
3386f4bd
JS
591 if (vport->load_flag & FC_UNLOADING)
592 return -ENODEV;
593
815a9c43
JS
594 /* Need the ndlp. It is stored in the driver's rport. */
595 ndlp = rport->ndlp;
ba43c4d0
JS
596 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
597 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
815a9c43
JS
598 "6051 Remoteport %p, rport has invalid ndlp. "
599 "Failing LS Req\n", pnvme_rport);
ba43c4d0
JS
600 return -ENODEV;
601 }
602
603 /* The remote node has to be a mapped nvme target or an
604 * unmapped nvme initiator or it's an error.
605 */
606 ntype = ndlp->nlp_type;
607 nstate = ndlp->nlp_state;
608 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
609 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
610 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
611 "6088 DID x%06x not ready for "
612 "IO. State x%x, Type x%x\n",
613 pnvme_rport->port_id,
614 ndlp->nlp_state, ndlp->nlp_type);
615 return -ENODEV;
01649561
JS
616 }
617 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
618 if (!bmp) {
619
620 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
621 "6044 Could not find node for DID %x\n",
622 pnvme_rport->port_id);
623 return 2;
624 }
625 INIT_LIST_HEAD(&bmp->list);
626 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
627 if (!bmp->virt) {
628 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
629 "6042 Could not find node for DID %x\n",
630 pnvme_rport->port_id);
631 kfree(bmp);
632 return 3;
633 }
634 bpl = (struct ulp_bde64 *)bmp->virt;
635 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
636 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
637 bpl->tus.f.bdeFlags = 0;
638 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
639 bpl->tus.w = le32_to_cpu(bpl->tus.w);
640 bpl++;
641
642 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
643 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
644 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
645 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
646 bpl->tus.w = le32_to_cpu(bpl->tus.w);
647
648 /* Expand print to include key fields. */
649 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
815a9c43
JS
650 "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
651 "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
652 ndlp->nlp_DID,
01649561
JS
653 pnvme_lport, pnvme_rport,
654 pnvme_lsreq, pnvme_lsreq->rqstlen,
825c6abb
AB
655 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
656 &pnvme_lsreq->rspdma);
01649561 657
66a210ff 658 atomic_inc(&lport->fc4NvmeLsRequests);
01649561
JS
659
660 /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
661 * This code allows it all to work.
662 */
663 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
664 pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
665 ndlp, 2, 30, 0);
666 if (ret != WQE_SUCCESS) {
4b056682 667 atomic_inc(&lport->xmt_ls_err);
815a9c43 668 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
01649561
JS
669 "6052 EXIT. issue ls wqe failed lport %p, "
670 "rport %p lsreq%p Status %x DID %x\n",
671 pnvme_lport, pnvme_rport, pnvme_lsreq,
672 ret, ndlp->nlp_DID);
673 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
674 kfree(bmp);
675 return ret;
676 }
677
678 /* Stub in routine and return 0 for now. */
679 return ret;
680}
681
682/**
683 * lpfc_nvme_ls_abort - Issue an Link Service request
684 * @lpfc_pnvme: Pointer to the driver's nvme instance data
685 * @lpfc_nvme_lport: Pointer to the driver's local port data
686 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
687 *
688 * Driver registers this routine to handle any link service request
689 * from the nvme_fc transport to a remote nvme-aware port.
690 *
691 * Return value :
692 * 0 - Success
693 * TODO: What are the failure codes.
694 **/
695static void
696lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
697 struct nvme_fc_remote_port *pnvme_rport,
698 struct nvmefc_ls_req *pnvme_lsreq)
699{
700 struct lpfc_nvme_lport *lport;
701 struct lpfc_vport *vport;
702 struct lpfc_hba *phba;
703 struct lpfc_nodelist *ndlp;
704 LIST_HEAD(abort_list);
705 struct lpfc_sli_ring *pring;
706 struct lpfc_iocbq *wqe, *next_wqe;
707
708 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
66a210ff
JS
709 if (unlikely(!lport))
710 return;
01649561
JS
711 vport = lport->vport;
712 phba = vport->phba;
713
3386f4bd
JS
714 if (vport->load_flag & FC_UNLOADING)
715 return;
716
01649561
JS
717 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
718 if (!ndlp) {
719 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
720 "6049 Could not find node for DID %x\n",
721 pnvme_rport->port_id);
722 return;
723 }
724
725 /* Expand print to include key fields. */
726 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
727 "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
825c6abb 728 "rsplen:%d %pad %pad\n",
01649561
JS
729 pnvme_lport, pnvme_rport,
730 pnvme_lsreq, pnvme_lsreq->rqstlen,
825c6abb
AB
731 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
732 &pnvme_lsreq->rspdma);
01649561
JS
733
734 /*
735 * Lock the ELS ring txcmplq and build a local list of all ELS IOs
736 * that need an ABTS. The IOs need to stay on the txcmplq so that
737 * the abort operation completes them successfully.
738 */
739 pring = phba->sli4_hba.nvmels_wq->pring;
740 spin_lock_irq(&phba->hbalock);
741 spin_lock(&pring->ring_lock);
742 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
743 /* Add to abort_list on on NDLP match. */
744 if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
745 wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
746 list_add_tail(&wqe->dlist, &abort_list);
747 }
748 }
749 spin_unlock(&pring->ring_lock);
750 spin_unlock_irq(&phba->hbalock);
751
752 /* Abort the targeted IOs and remove them from the abort list. */
753 list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
4b056682 754 atomic_inc(&lport->xmt_ls_abort);
01649561
JS
755 spin_lock_irq(&phba->hbalock);
756 list_del_init(&wqe->dlist);
757 lpfc_sli_issue_abort_iotag(phba, pring, wqe);
758 spin_unlock_irq(&phba->hbalock);
759 }
760}
761
762/* Fix up the existing sgls for NVME IO. */
5fd11085 763static inline void
01649561 764lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
c490850a 765 struct lpfc_io_buf *lpfc_ncmd,
01649561
JS
766 struct nvmefc_fcp_req *nCmd)
767{
4e565cf0 768 struct lpfc_hba *phba = vport->phba;
01649561
JS
769 struct sli4_sge *sgl;
770 union lpfc_wqe128 *wqe;
771 uint32_t *wptr, *dptr;
772
4e565cf0
JS
773 /*
774 * Get a local pointer to the built-in wqe and correct
775 * the cmd size to match NVME's 96 bytes and fix
776 * the dma address.
777 */
778
205e8240 779 wqe = &lpfc_ncmd->cur_iocbq.wqe;
4e565cf0 780
01649561
JS
781 /*
782 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
783 * match NVME. NVME sends 96 bytes. Also, use the
784 * nvme commands command and response dma addresses
785 * rather than the virtual memory to ease the restore
786 * operation.
787 */
0794d601 788 sgl = lpfc_ncmd->dma_sgl;
01649561 789 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
4e565cf0
JS
790 if (phba->cfg_nvme_embed_cmd) {
791 sgl->addr_hi = 0;
792 sgl->addr_lo = 0;
793
794 /* Word 0-2 - NVME CMND IU (embedded payload) */
795 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
796 wqe->generic.bde.tus.f.bdeSize = 56;
797 wqe->generic.bde.addrHigh = 0;
798 wqe->generic.bde.addrLow = 64; /* Word 16 */
5fd11085
JS
799
800 /* Word 10 - dbde is 0, wqes is 1 in template */
801
802 /*
803 * Embed the payload in the last half of the WQE
804 * WQE words 16-30 get the NVME CMD IU payload
805 *
806 * WQE words 16-19 get payload Words 1-4
807 * WQE words 20-21 get payload Words 6-7
808 * WQE words 22-29 get payload Words 16-23
809 */
810 wptr = &wqe->words[16]; /* WQE ptr */
811 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
812 dptr++; /* Skip Word 0 in payload */
813
814 *wptr++ = *dptr++; /* Word 1 */
815 *wptr++ = *dptr++; /* Word 2 */
816 *wptr++ = *dptr++; /* Word 3 */
817 *wptr++ = *dptr++; /* Word 4 */
818 dptr++; /* Skip Word 5 in payload */
819 *wptr++ = *dptr++; /* Word 6 */
820 *wptr++ = *dptr++; /* Word 7 */
821 dptr += 8; /* Skip Words 8-15 in payload */
822 *wptr++ = *dptr++; /* Word 16 */
823 *wptr++ = *dptr++; /* Word 17 */
824 *wptr++ = *dptr++; /* Word 18 */
825 *wptr++ = *dptr++; /* Word 19 */
826 *wptr++ = *dptr++; /* Word 20 */
827 *wptr++ = *dptr++; /* Word 21 */
828 *wptr++ = *dptr++; /* Word 22 */
829 *wptr = *dptr; /* Word 23 */
4e565cf0
JS
830 } else {
831 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
832 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
833
834 /* Word 0-2 - NVME CMND IU Inline BDE */
835 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
836 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
837 wqe->generic.bde.addrHigh = sgl->addr_hi;
838 wqe->generic.bde.addrLow = sgl->addr_lo;
5fd11085
JS
839
840 /* Word 10 */
841 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
842 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
4e565cf0 843 }
01649561
JS
844
845 sgl++;
846
847 /* Setup the physical region for the FCP RSP */
848 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
849 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
850 sgl->word2 = le32_to_cpu(sgl->word2);
851 if (nCmd->sg_cnt)
852 bf_set(lpfc_sli4_sge_last, sgl, 0);
853 else
854 bf_set(lpfc_sli4_sge_last, sgl, 1);
855 sgl->word2 = cpu_to_le32(sgl->word2);
856 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
01649561
JS
857}
858
bd2cdd5e
JS
859#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
860static void
861lpfc_nvme_ktime(struct lpfc_hba *phba,
c490850a 862 struct lpfc_io_buf *lpfc_ncmd)
bd2cdd5e
JS
863{
864 uint64_t seg1, seg2, seg3, seg4;
c8a4ce0b 865 uint64_t segsum;
bd2cdd5e 866
bd2cdd5e
JS
867 if (!lpfc_ncmd->ts_last_cmd ||
868 !lpfc_ncmd->ts_cmd_start ||
869 !lpfc_ncmd->ts_cmd_wqput ||
870 !lpfc_ncmd->ts_isr_cmpl ||
871 !lpfc_ncmd->ts_data_nvme)
872 return;
c8a4ce0b
DK
873
874 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
875 return;
bd2cdd5e
JS
876 if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
877 return;
878 if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
879 return;
880 if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
881 return;
882 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
883 return;
884 /*
885 * Segment 1 - Time from Last FCP command cmpl is handed
886 * off to NVME Layer to start of next command.
887 * Segment 2 - Time from Driver receives a IO cmd start
888 * from NVME Layer to WQ put is done on IO cmd.
889 * Segment 3 - Time from Driver WQ put is done on IO cmd
890 * to MSI-X ISR for IO cmpl.
891 * Segment 4 - Time from MSI-X ISR for IO cmpl to when
892 * cmpl is handled off to the NVME Layer.
893 */
894 seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
c8a4ce0b
DK
895 if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
896 seg1 = 0;
bd2cdd5e
JS
897
898 /* Calculate times relative to start of IO */
899 seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
c8a4ce0b
DK
900 segsum = seg2;
901 seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
902 if (segsum > seg3)
903 return;
904 seg3 -= segsum;
905 segsum += seg3;
906
907 seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
908 if (segsum > seg4)
909 return;
910 seg4 -= segsum;
911
bd2cdd5e
JS
912 phba->ktime_data_samples++;
913 phba->ktime_seg1_total += seg1;
914 if (seg1 < phba->ktime_seg1_min)
915 phba->ktime_seg1_min = seg1;
916 else if (seg1 > phba->ktime_seg1_max)
917 phba->ktime_seg1_max = seg1;
918 phba->ktime_seg2_total += seg2;
919 if (seg2 < phba->ktime_seg2_min)
920 phba->ktime_seg2_min = seg2;
921 else if (seg2 > phba->ktime_seg2_max)
922 phba->ktime_seg2_max = seg2;
923 phba->ktime_seg3_total += seg3;
924 if (seg3 < phba->ktime_seg3_min)
925 phba->ktime_seg3_min = seg3;
926 else if (seg3 > phba->ktime_seg3_max)
927 phba->ktime_seg3_max = seg3;
928 phba->ktime_seg4_total += seg4;
929 if (seg4 < phba->ktime_seg4_min)
930 phba->ktime_seg4_min = seg4;
931 else if (seg4 > phba->ktime_seg4_max)
932 phba->ktime_seg4_max = seg4;
933
934 lpfc_ncmd->ts_last_cmd = 0;
935 lpfc_ncmd->ts_cmd_start = 0;
936 lpfc_ncmd->ts_cmd_wqput = 0;
937 lpfc_ncmd->ts_isr_cmpl = 0;
938 lpfc_ncmd->ts_data_nvme = 0;
939}
940#endif
941
01649561
JS
942/**
943 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
944 * @lpfc_pnvme: Pointer to the driver's nvme instance data
945 * @lpfc_nvme_lport: Pointer to the driver's local port data
946 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
947 *
948 * Driver registers this routine as it io request handler. This
949 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
950 * data structure to the rport indicated in @lpfc_nvme_rport.
951 *
952 * Return value :
953 * 0 - Success
954 * TODO: What are the failure codes.
955 **/
956static void
957lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
958 struct lpfc_wcqe_complete *wcqe)
959{
c490850a
JS
960 struct lpfc_io_buf *lpfc_ncmd =
961 (struct lpfc_io_buf *)pwqeIn->context1;
01649561
JS
962 struct lpfc_vport *vport = pwqeIn->vport;
963 struct nvmefc_fcp_req *nCmd;
964 struct nvme_fc_ersp_iu *ep;
965 struct nvme_fc_cmd_iu *cp;
01649561 966 struct lpfc_nodelist *ndlp;
bbe3012b 967 struct lpfc_nvme_fcpreq_priv *freqpriv;
4b056682 968 struct lpfc_nvme_lport *lport;
352b205a 969 uint32_t code, status, idx;
01649561
JS
970 uint16_t cid, sqhd, data;
971 uint32_t *ptr;
972
973 /* Sanity check on return of outstanding command */
c2017260
JS
974 if (!lpfc_ncmd) {
975 lpfc_printf_vlog(vport, KERN_ERR,
976 LOG_NODE | LOG_NVME_IOERR,
977 "6071 Null lpfc_ncmd pointer. No "
978 "release, skip completion\n");
979 return;
980 }
4d5e789a 981
c2017260
JS
982 /* Guard against abort handler being called at same time */
983 spin_lock(&lpfc_ncmd->buf_lock);
4d5e789a 984
c2017260
JS
985 if (!lpfc_ncmd->nvmeCmd) {
986 spin_unlock(&lpfc_ncmd->buf_lock);
01649561 987 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
4d5e789a 988 "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
0b05e9fe
JS
989 "nvmeCmd %p\n",
990 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
4d5e789a
JS
991
992 /* Release the lpfc_ncmd regardless of the missing elements. */
993 lpfc_release_nvme_buf(phba, lpfc_ncmd);
01649561
JS
994 return;
995 }
01649561 996 nCmd = lpfc_ncmd->nvmeCmd;
4b056682 997 status = bf_get(lpfc_wcqe_c_status, wcqe);
66a210ff 998
4c47efc1
JS
999 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
1000 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
1001
1df09449 1002 if (unlikely(status && vport->localport)) {
66a85155 1003 lport = (struct lpfc_nvme_lport *)vport->localport->private;
1df09449 1004 if (lport) {
4c47efc1
JS
1005 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1006 atomic_inc(&lport->cmpl_fcp_xb);
1007 atomic_inc(&lport->cmpl_fcp_err);
66a210ff 1008 }
4b056682 1009 }
01649561 1010
bd2cdd5e
JS
1011 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
1012 lpfc_ncmd->cur_iocbq.sli4_xritag,
4b056682 1013 status, wcqe->parameter);
01649561
JS
1014 /*
1015 * Catch race where our node has transitioned, but the
1016 * transport is still transitioning.
1017 */
0b05e9fe 1018 ndlp = lpfc_ncmd->ndlp;
01649561 1019 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
0b05e9fe
JS
1020 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1021 "6062 Ignoring NVME cmpl. No ndlp\n");
1022 goto out_err;
01649561
JS
1023 }
1024
1025 code = bf_get(lpfc_wcqe_c_code, wcqe);
1026 if (code == CQE_CODE_NVME_ERSP) {
1027 /* For this type of CQE, we need to rebuild the rsp */
1028 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1029
1030 /*
1031 * Get Command Id from cmd to plug into response. This
1032 * code is not needed in the next NVME Transport drop.
1033 */
1034 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1035 cid = cp->sqe.common.command_id;
1036
1037 /*
1038 * RSN is in CQE word 2
1039 * SQHD is in CQE Word 3 bits 15:0
1040 * Cmd Specific info is in CQE Word 1
1041 * and in CQE Word 0 bits 15:0
1042 */
1043 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1044
1045 /* Now lets build the NVME ERSP IU */
1046 ep->iu_len = cpu_to_be16(8);
1047 ep->rsn = wcqe->parameter;
1048 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1049 ep->rsvd12 = 0;
1050 ptr = (uint32_t *)&ep->cqe.result.u64;
1051 *ptr++ = wcqe->total_data_placed;
1052 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1053 *ptr = (uint32_t)data;
1054 ep->cqe.sq_head = sqhd;
1055 ep->cqe.sq_id = nCmd->sqid;
1056 ep->cqe.command_id = cid;
1057 ep->cqe.status = 0;
1058
1059 lpfc_ncmd->status = IOSTAT_SUCCESS;
1060 lpfc_ncmd->result = 0;
1061 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1062 nCmd->transferred_length = nCmd->payload_length;
1063 } else {
4b056682 1064 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
952c303b 1065 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
01649561
JS
1066
1067 /* For NVME, the only failure path that results in an
1068 * IO error is when the adapter rejects it. All other
1069 * conditions are a success case and resolved by the
1070 * transport.
1071 * IOSTAT_FCP_RSP_ERROR means:
1072 * 1. Length of data received doesn't match total
1073 * transfer length in WQE
1074 * 2. If the RSP payload does NOT match these cases:
1075 * a. RSP length 12/24 bytes and all zeros
1076 * b. NVME ERSP
1077 */
1078 switch (lpfc_ncmd->status) {
1079 case IOSTAT_SUCCESS:
1080 nCmd->transferred_length = wcqe->total_data_placed;
1081 nCmd->rcv_rsplen = 0;
1082 nCmd->status = 0;
1083 break;
1084 case IOSTAT_FCP_RSP_ERROR:
1085 nCmd->transferred_length = wcqe->total_data_placed;
1086 nCmd->rcv_rsplen = wcqe->parameter;
1087 nCmd->status = 0;
1088 /* Sanity check */
1089 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
1090 break;
1091 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1092 "6081 NVME Completion Protocol Error: "
86c67379
JS
1093 "xri %x status x%x result x%x "
1094 "placed x%x\n",
1095 lpfc_ncmd->cur_iocbq.sli4_xritag,
01649561
JS
1096 lpfc_ncmd->status, lpfc_ncmd->result,
1097 wcqe->total_data_placed);
1098 break;
952c303b
DK
1099 case IOSTAT_LOCAL_REJECT:
1100 /* Let fall through to set command final state. */
1101 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1102 lpfc_printf_vlog(vport, KERN_INFO,
1103 LOG_NVME_IOERR,
1104 "6032 Delay Aborted cmd %p "
1105 "nvme cmd %p, xri x%x, "
1106 "xb %d\n",
1107 lpfc_ncmd, nCmd,
1108 lpfc_ncmd->cur_iocbq.sli4_xritag,
1109 bf_get(lpfc_wcqe_c_xb, wcqe));
cd05c155 1110 /* fall through */
01649561
JS
1111 default:
1112out_err:
e3246a12 1113 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
86c67379 1114 "6072 NVME Completion Error: xri %x "
2c013a3a
JS
1115 "status x%x result x%x [x%x] "
1116 "placed x%x\n",
86c67379 1117 lpfc_ncmd->cur_iocbq.sli4_xritag,
01649561 1118 lpfc_ncmd->status, lpfc_ncmd->result,
2c013a3a 1119 wcqe->parameter,
01649561
JS
1120 wcqe->total_data_placed);
1121 nCmd->transferred_length = 0;
1122 nCmd->rcv_rsplen = 0;
8e009ce8 1123 nCmd->status = NVME_SC_INTERNAL;
01649561
JS
1124 }
1125 }
1126
1127 /* pick up SLI4 exhange busy condition */
1128 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1129 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1130 else
1131 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1132
01649561
JS
1133 /* Update stats and complete the IO. There is
1134 * no need for dma unprep because the nvme_transport
1135 * owns the dma address.
1136 */
bd2cdd5e 1137#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
c8a4ce0b 1138 if (lpfc_ncmd->ts_cmd_start) {
bd2cdd5e
JS
1139 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1140 lpfc_ncmd->ts_data_nvme = ktime_get_ns();
1141 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
1142 lpfc_nvme_ktime(phba, lpfc_ncmd);
1143 }
1df09449 1144 if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
352b205a 1145 uint32_t cpu;
63df6d63 1146 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
d6d189ce 1147 cpu = raw_smp_processor_id();
63df6d63
JS
1148 if (cpu < LPFC_CHECK_CPU_CNT) {
1149 if (lpfc_ncmd->cpu != cpu)
1150 lpfc_printf_vlog(vport,
1151 KERN_INFO, LOG_NVME_IOERR,
1152 "6701 CPU Check cmpl: "
1153 "cpu %d expect %d\n",
1154 cpu, lpfc_ncmd->cpu);
1155 phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
1156 }
bd2cdd5e
JS
1157 }
1158#endif
952c303b
DK
1159
1160 /* NVME targets need completion held off until the abort exchange
add9d6be 1161 * completes unless the NVME Rport is getting unregistered.
952c303b 1162 */
add9d6be 1163
3fd78355 1164 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
91455b85
JS
1165 freqpriv = nCmd->private;
1166 freqpriv->nvme_buf = NULL;
3fd78355 1167 lpfc_ncmd->nvmeCmd = NULL;
c2017260
JS
1168 spin_unlock(&lpfc_ncmd->buf_lock);
1169 nCmd->done(nCmd);
1170 } else
1171 spin_unlock(&lpfc_ncmd->buf_lock);
01649561 1172
952c303b 1173 /* Call release with XB=1 to queue the IO into the abort list. */
01649561
JS
1174 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1175}
1176
1177
1178/**
1179 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1180 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1181 * @lpfc_nvme_lport: Pointer to the driver's local port data
1182 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1183 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1184 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1185 *
1186 * Driver registers this routine as it io request handler. This
1187 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1188 * data structure to the rport indicated in @lpfc_nvme_rport.
1189 *
1190 * Return value :
1191 * 0 - Success
1192 * TODO: What are the failure codes.
1193 **/
1194static int
1195lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
c490850a 1196 struct lpfc_io_buf *lpfc_ncmd,
66a210ff 1197 struct lpfc_nodelist *pnode,
4c47efc1 1198 struct lpfc_fc4_ctrl_stat *cstat)
01649561
JS
1199{
1200 struct lpfc_hba *phba = vport->phba;
1201 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1202 struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
205e8240 1203 union lpfc_wqe128 *wqe = &pwqeq->wqe;
01649561
JS
1204 uint32_t req_len;
1205
e960f5ab 1206 if (!NLP_CHK_NODE_ACT(pnode))
01649561
JS
1207 return -EINVAL;
1208
1209 /*
1210 * There are three possibilities here - use scatter-gather segment, use
1211 * the single mapping, or neither.
1212 */
01649561
JS
1213 if (nCmd->sg_cnt) {
1214 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
5fd11085
JS
1215 /* From the iwrite template, initialize words 7 - 11 */
1216 memcpy(&wqe->words[7],
1217 &lpfc_iwrite_cmd_template.words[7],
1218 sizeof(uint32_t) * 5);
1219
1220 /* Word 4 */
1221 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1222
01649561
JS
1223 /* Word 5 */
1224 if ((phba->cfg_nvme_enable_fb) &&
1225 (pnode->nlp_flag & NLP_FIRSTBURST)) {
1226 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1227 if (req_len < pnode->nvme_fb_size)
1228 wqe->fcp_iwrite.initial_xfer_len =
1229 req_len;
1230 else
1231 wqe->fcp_iwrite.initial_xfer_len =
1232 pnode->nvme_fb_size;
5fd11085
JS
1233 } else {
1234 wqe->fcp_iwrite.initial_xfer_len = 0;
01649561 1235 }
4c47efc1 1236 cstat->output_requests++;
01649561 1237 } else {
5fd11085
JS
1238 /* From the iread template, initialize words 7 - 11 */
1239 memcpy(&wqe->words[7],
1240 &lpfc_iread_cmd_template.words[7],
1241 sizeof(uint32_t) * 5);
1242
1243 /* Word 4 */
1244 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1245
1246 /* Word 5 */
1247 wqe->fcp_iread.rsrvd5 = 0;
01649561 1248
4c47efc1 1249 cstat->input_requests++;
01649561
JS
1250 }
1251 } else {
5fd11085
JS
1252 /* From the icmnd template, initialize words 4 - 11 */
1253 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1254 sizeof(uint32_t) * 8);
4c47efc1 1255 cstat->control_requests++;
01649561
JS
1256 }
1257 /*
1258 * Finish initializing those WQE fields that are independent
1259 * of the nvme_cmnd request_buffer
1260 */
1261
5fd11085
JS
1262 /* Word 3 */
1263 bf_set(payload_offset_len, &wqe->fcp_icmd,
1264 (nCmd->rsplen + nCmd->cmdlen));
1265
01649561
JS
1266 /* Word 6 */
1267 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1268 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1269 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1270
01649561
JS
1271 /* Word 8 */
1272 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1273
1274 /* Word 9 */
1275 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1276
414abe0a
JS
1277 /* Words 13 14 15 are for PBDE support */
1278
01649561
JS
1279 pwqeq->vport = vport;
1280 return 0;
1281}
1282
1283
1284/**
1285 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1286 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1287 * @lpfc_nvme_lport: Pointer to the driver's local port data
1288 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1289 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1290 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1291 *
1292 * Driver registers this routine as it io request handler. This
1293 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1294 * data structure to the rport indicated in @lpfc_nvme_rport.
1295 *
1296 * Return value :
1297 * 0 - Success
1298 * TODO: What are the failure codes.
1299 **/
1300static int
1301lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
c490850a 1302 struct lpfc_io_buf *lpfc_ncmd)
01649561
JS
1303{
1304 struct lpfc_hba *phba = vport->phba;
1305 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
205e8240 1306 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
0794d601 1307 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
01649561
JS
1308 struct scatterlist *data_sg;
1309 struct sli4_sge *first_data_sgl;
0bc2b7c5 1310 struct ulp_bde64 *bde;
01649561
JS
1311 dma_addr_t physaddr;
1312 uint32_t num_bde = 0;
1313 uint32_t dma_len;
1314 uint32_t dma_offset = 0;
1315 int nseg, i;
1316
1317 /* Fix up the command and response DMA stuff. */
1318 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1319
1320 /*
1321 * There are three possibilities here - use scatter-gather segment, use
1322 * the single mapping, or neither.
1323 */
1324 if (nCmd->sg_cnt) {
1325 /*
1326 * Jump over the cmd and rsp SGEs. The fix routine
1327 * has already adjusted for this.
1328 */
1329 sgl += 2;
1330
1331 first_data_sgl = sgl;
1332 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
81e6a637 1333 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
01649561
JS
1334 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1335 "6058 Too many sg segments from "
1336 "NVME Transport. Max %d, "
1337 "nvmeIO sg_cnt %d\n",
29bfd55a 1338 phba->cfg_nvme_seg_cnt + 1,
01649561
JS
1339 lpfc_ncmd->seg_cnt);
1340 lpfc_ncmd->seg_cnt = 0;
1341 return 1;
1342 }
1343
1344 /*
1345 * The driver established a maximum scatter-gather segment count
1346 * during probe that limits the number of sg elements in any
1347 * single nvme command. Just run through the seg_cnt and format
1348 * the sge's.
1349 */
1350 nseg = nCmd->sg_cnt;
1351 data_sg = nCmd->first_sgl;
1352 for (i = 0; i < nseg; i++) {
1353 if (data_sg == NULL) {
1354 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1355 "6059 dptr err %d, nseg %d\n",
1356 i, nseg);
1357 lpfc_ncmd->seg_cnt = 0;
1358 return 1;
1359 }
1360 physaddr = data_sg->dma_address;
1361 dma_len = data_sg->length;
1362 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1363 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1364 sgl->word2 = le32_to_cpu(sgl->word2);
1365 if ((num_bde + 1) == nseg)
1366 bf_set(lpfc_sli4_sge_last, sgl, 1);
1367 else
1368 bf_set(lpfc_sli4_sge_last, sgl, 0);
1369 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1370 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1371 sgl->word2 = cpu_to_le32(sgl->word2);
1372 sgl->sge_len = cpu_to_le32(dma_len);
1373
1374 dma_offset += dma_len;
1375 data_sg = sg_next(data_sg);
1376 sgl++;
1377 }
414abe0a 1378 if (phba->cfg_enable_pbde) {
0bc2b7c5
JS
1379 /* Use PBDE support for first SGL only, offset == 0 */
1380 /* Words 13-15 */
1381 bde = (struct ulp_bde64 *)
1382 &wqe->words[13];
1383 bde->addrLow = first_data_sgl->addr_lo;
1384 bde->addrHigh = first_data_sgl->addr_hi;
1385 bde->tus.f.bdeSize =
1386 le32_to_cpu(first_data_sgl->sge_len);
1387 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1388 bde->tus.w = cpu_to_le32(bde->tus.w);
5fd11085
JS
1389 /* wqe_pbde is 1 in template */
1390 } else {
1391 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
0bc2b7c5 1392 bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
5fd11085 1393 }
0bc2b7c5 1394
414abe0a 1395 } else {
0794d601
JS
1396 lpfc_ncmd->seg_cnt = 0;
1397
01649561
JS
1398 /* For this clause to be valid, the payload_length
1399 * and sg_cnt must zero.
1400 */
1401 if (nCmd->payload_length != 0) {
1402 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1403 "6063 NVME DMA Prep Err: sg_cnt %d "
1404 "payload_length x%x\n",
1405 nCmd->sg_cnt, nCmd->payload_length);
1406 return 1;
1407 }
1408 }
01649561
JS
1409 return 0;
1410}
1411
1412/**
1413 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1414 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1415 * @lpfc_nvme_lport: Pointer to the driver's local port data
1416 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1417 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1418 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1419 *
1420 * Driver registers this routine as it io request handler. This
1421 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1422 * data structure to the rport
1423 indicated in @lpfc_nvme_rport.
1424 *
1425 * Return value :
1426 * 0 - Success
1427 * TODO: What are the failure codes.
1428 **/
1429static int
1430lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1431 struct nvme_fc_remote_port *pnvme_rport,
1432 void *hw_queue_handle,
1433 struct nvmefc_fcp_req *pnvme_fcreq)
1434{
1435 int ret = 0;
cf1a1d3e 1436 int expedite = 0;
63df6d63 1437 int idx, cpu;
01649561 1438 struct lpfc_nvme_lport *lport;
4c47efc1 1439 struct lpfc_fc4_ctrl_stat *cstat;
01649561
JS
1440 struct lpfc_vport *vport;
1441 struct lpfc_hba *phba;
1442 struct lpfc_nodelist *ndlp;
c490850a 1443 struct lpfc_io_buf *lpfc_ncmd;
01649561
JS
1444 struct lpfc_nvme_rport *rport;
1445 struct lpfc_nvme_qhandle *lpfc_queue_info;
c3725bdc 1446 struct lpfc_nvme_fcpreq_priv *freqpriv;
cf1a1d3e 1447 struct nvme_common_command *sqe;
bd2cdd5e
JS
1448#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1449 uint64_t start = 0;
1450#endif
01649561 1451
c3725bdc
JS
1452 /* Validate pointers. LLDD fault handling with transport does
1453 * have timing races.
1454 */
01649561 1455 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
c3725bdc
JS
1456 if (unlikely(!lport)) {
1457 ret = -EINVAL;
1458 goto out_fail;
1459 }
1460
01649561 1461 vport = lport->vport;
c3725bdc
JS
1462
1463 if (unlikely(!hw_queue_handle)) {
44c2757b
JS
1464 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1465 "6117 Fail IO, NULL hw_queue_handle\n");
1466 atomic_inc(&lport->xmt_fcp_err);
cd240071 1467 ret = -EBUSY;
c3725bdc
JS
1468 goto out_fail;
1469 }
1470
01649561
JS
1471 phba = vport->phba;
1472
1abcb371
DK
1473 if (vport->load_flag & FC_UNLOADING) {
1474 ret = -ENODEV;
1475 goto out_fail;
1476 }
1477
1df09449 1478 if (unlikely(vport->load_flag & FC_UNLOADING)) {
44c2757b
JS
1479 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1480 "6124 Fail IO, Driver unload\n");
1481 atomic_inc(&lport->xmt_fcp_err);
3386f4bd
JS
1482 ret = -ENODEV;
1483 goto out_fail;
1484 }
1485
c3725bdc
JS
1486 freqpriv = pnvme_fcreq->private;
1487 if (unlikely(!freqpriv)) {
44c2757b
JS
1488 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1489 "6158 Fail IO, NULL request data\n");
1490 atomic_inc(&lport->xmt_fcp_err);
c3725bdc 1491 ret = -EINVAL;
b7672ae6
DK
1492 goto out_fail;
1493 }
1494
bd2cdd5e
JS
1495#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1496 if (phba->ktime_on)
1497 start = ktime_get_ns();
1498#endif
01649561
JS
1499 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1500 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1501
1502 /*
1503 * Catch race where our node has transitioned, but the
1504 * transport is still transitioning.
1505 */
1506 ndlp = rport->ndlp;
1507 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
44c2757b
JS
1508 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1509 "6053 Fail IO, ndlp not ready: rport %p "
1510 "ndlp %p, DID x%06x\n",
01649561 1511 rport, ndlp, pnvme_rport->port_id);
44c2757b
JS
1512 atomic_inc(&lport->xmt_fcp_err);
1513 ret = -EBUSY;
1514 goto out_fail;
01649561
JS
1515 }
1516
1517 /* The remote node has to be a mapped target or it's an error. */
1518 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1519 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
44c2757b
JS
1520 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1521 "6036 Fail IO, DID x%06x not ready for "
cd240071 1522 "IO. State x%x, Type x%x Flg x%x\n",
44c2757b 1523 pnvme_rport->port_id,
cd240071
JS
1524 ndlp->nlp_state, ndlp->nlp_type,
1525 ndlp->upcall_flags);
4b056682 1526 atomic_inc(&lport->xmt_fcp_bad_ndlp);
cd240071 1527 ret = -EBUSY;
01649561
JS
1528 goto out_fail;
1529
1530 }
1531
cf1a1d3e
JS
1532 /* Currently only NVME Keep alive commands should be expedited
1533 * if the driver runs out of a resource. These should only be
1534 * issued on the admin queue, qidx 0
1535 */
1536 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1537 sqe = &((struct nvme_fc_cmd_iu *)
1538 pnvme_fcreq->cmdaddr)->sqe.common;
1539 if (sqe->opcode == nvme_admin_keep_alive)
1540 expedite = 1;
1541 }
1542
01649561
JS
1543 /* The node is shared with FCP IO, make sure the IO pending count does
1544 * not exceed the programmed depth.
1545 */
2a5b7d62
JS
1546 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1547 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1548 !expedite) {
1549 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1550 "6174 Fail IO, ndlp qdepth exceeded: "
1551 "idx %d DID %x pend %d qdepth %d\n",
1552 lpfc_queue_info->index, ndlp->nlp_DID,
1553 atomic_read(&ndlp->cmd_pending),
1554 ndlp->cmd_qdepth);
1555 atomic_inc(&lport->xmt_fcp_qdepth);
1556 ret = -EBUSY;
1557 goto out_fail;
1558 }
01649561
JS
1559 }
1560
6a828b0f 1561 /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
45aa312e
JS
1562 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1563 idx = lpfc_queue_info->index;
1564 } else {
d6d189ce 1565 cpu = raw_smp_processor_id();
6a828b0f 1566 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
45aa312e
JS
1567 }
1568
1569 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
01649561 1570 if (lpfc_ncmd == NULL) {
4b056682 1571 atomic_inc(&lport->xmt_fcp_noxri);
01649561 1572 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
44c2757b
JS
1573 "6065 Fail IO, driver buffer pool is empty: "
1574 "idx %d DID %x\n",
1575 lpfc_queue_info->index, ndlp->nlp_DID);
cd22d605 1576 ret = -EBUSY;
01649561
JS
1577 goto out_fail;
1578 }
bd2cdd5e 1579#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
c8a4ce0b 1580 if (start) {
bd2cdd5e
JS
1581 lpfc_ncmd->ts_cmd_start = start;
1582 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
c8a4ce0b
DK
1583 } else {
1584 lpfc_ncmd->ts_cmd_start = 0;
bd2cdd5e
JS
1585 }
1586#endif
01649561
JS
1587
1588 /*
1589 * Store the data needed by the driver to issue, abort, and complete
1590 * an IO.
1591 * Do not let the IO hang out forever. There is no midlayer issuing
1592 * an abort so inform the FW of the maximum IO pending time.
1593 */
bbe3012b 1594 freqpriv->nvme_buf = lpfc_ncmd;
01649561 1595 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
318083ad 1596 lpfc_ncmd->ndlp = ndlp;
0794d601 1597 lpfc_ncmd->qidx = lpfc_queue_info->qidx;
01649561 1598
01649561
JS
1599 /*
1600 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1601 * This identfier was create in our hardware queue create callback
1602 * routine. The driver now is dependent on the IO queue steering from
1603 * the transport. We are trusting the upper NVME layers know which
1604 * index to use and that they have affinitized a CPU to this hardware
1605 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1606 */
66a210ff 1607 lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
4c47efc1 1608 cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
66a210ff
JS
1609
1610 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1611 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1612 if (ret) {
44c2757b
JS
1613 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1614 "6175 Fail IO, Prep DMA: "
1615 "idx %d DID %x\n",
1616 lpfc_queue_info->index, ndlp->nlp_DID);
1617 atomic_inc(&lport->xmt_fcp_err);
66a210ff
JS
1618 ret = -ENOMEM;
1619 goto out_free_nvme_buf;
1620 }
1621
bd2cdd5e
JS
1622 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1623 lpfc_ncmd->cur_iocbq.sli4_xritag,
1624 lpfc_queue_info->index, ndlp->nlp_DID);
1625
1fbf9742 1626 ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
01649561 1627 if (ret) {
4b056682 1628 atomic_inc(&lport->xmt_fcp_wqerr);
e3246a12 1629 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
44c2757b 1630 "6113 Fail IO, Could not issue WQE err %x "
01649561
JS
1631 "sid: x%x did: x%x oxid: x%x\n",
1632 ret, vport->fc_myDID, ndlp->nlp_DID,
1633 lpfc_ncmd->cur_iocbq.sli4_xritag);
01649561
JS
1634 goto out_free_nvme_buf;
1635 }
1636
c490850a
JS
1637 if (phba->cfg_xri_rebalancing)
1638 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1639
bd2cdd5e 1640#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
c8a4ce0b 1641 if (lpfc_ncmd->ts_cmd_start)
bd2cdd5e
JS
1642 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1643
1644 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
d6d189ce 1645 cpu = raw_smp_processor_id();
63df6d63
JS
1646 if (cpu < LPFC_CHECK_CPU_CNT) {
1647 lpfc_ncmd->cpu = cpu;
1648 if (idx != cpu)
bd2cdd5e 1649 lpfc_printf_vlog(vport,
63df6d63 1650 KERN_INFO, LOG_NVME_IOERR,
bd2cdd5e
JS
1651 "6702 CPU Check cmd: "
1652 "cpu %d wq %d\n",
1653 lpfc_ncmd->cpu,
1654 lpfc_queue_info->index);
63df6d63 1655 phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++;
bd2cdd5e 1656 }
bd2cdd5e
JS
1657 }
1658#endif
01649561
JS
1659 return 0;
1660
1661 out_free_nvme_buf:
2cee7808
JS
1662 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1663 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
4c47efc1 1664 cstat->output_requests--;
2cee7808 1665 else
4c47efc1 1666 cstat->input_requests--;
2cee7808 1667 } else
4c47efc1 1668 cstat->control_requests--;
01649561
JS
1669 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1670 out_fail:
1671 return ret;
1672}
1673
1674/**
1675 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1676 * @phba: Pointer to HBA context object
1677 * @cmdiocb: Pointer to command iocb object.
1678 * @rspiocb: Pointer to response iocb object.
1679 *
1680 * This is the callback function for any NVME FCP IO that was aborted.
1681 *
1682 * Return value:
1683 * None
1684 **/
1685void
1686lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1687 struct lpfc_wcqe_complete *abts_cmpl)
1688{
e3246a12 1689 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
01649561
JS
1690 "6145 ABORT_XRI_CN completing on rpi x%x "
1691 "original iotag x%x, abort cmd iotag x%x "
1692 "req_tag x%x, status x%x, hwstatus x%x\n",
1693 cmdiocb->iocb.un.acxri.abortContextTag,
1694 cmdiocb->iocb.un.acxri.abortIoTag,
1695 cmdiocb->iotag,
1696 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1697 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1698 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1699 lpfc_sli_release_iocbq(phba, cmdiocb);
1700}
1701
1702/**
1703 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1704 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1705 * @lpfc_nvme_lport: Pointer to the driver's local port data
1706 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1707 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1708 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1709 *
1710 * Driver registers this routine as its nvme request io abort handler. This
1711 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1712 * data structure to the rport indicated in @lpfc_nvme_rport. This routine
1713 * is executed asynchronously - one the target is validated as "MAPPED" and
1714 * ready for IO, the driver issues the abort request and returns.
1715 *
1716 * Return value:
1717 * None
1718 **/
1719static void
1720lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1721 struct nvme_fc_remote_port *pnvme_rport,
1722 void *hw_queue_handle,
1723 struct nvmefc_fcp_req *pnvme_fcreq)
1724{
1725 struct lpfc_nvme_lport *lport;
1726 struct lpfc_vport *vport;
1727 struct lpfc_hba *phba;
c490850a 1728 struct lpfc_io_buf *lpfc_nbuf;
01649561
JS
1729 struct lpfc_iocbq *abts_buf;
1730 struct lpfc_iocbq *nvmereq_wqe;
c3725bdc 1731 struct lpfc_nvme_fcpreq_priv *freqpriv;
205e8240 1732 union lpfc_wqe128 *abts_wqe;
01649561
JS
1733 unsigned long flags;
1734 int ret_val;
1735
c3725bdc
JS
1736 /* Validate pointers. LLDD fault handling with transport does
1737 * have timing races.
1738 */
01649561 1739 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
c3725bdc
JS
1740 if (unlikely(!lport))
1741 return;
1742
01649561 1743 vport = lport->vport;
c3725bdc
JS
1744
1745 if (unlikely(!hw_queue_handle)) {
1746 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1747 "6129 Fail Abort, HW Queue Handle NULL.\n");
1748 return;
1749 }
1750
01649561 1751 phba = vport->phba;
c3725bdc 1752 freqpriv = pnvme_fcreq->private;
01649561 1753
c3725bdc
JS
1754 if (unlikely(!freqpriv))
1755 return;
3386f4bd
JS
1756 if (vport->load_flag & FC_UNLOADING)
1757 return;
1758
01649561 1759 /* Announce entry to new IO submit field. */
86c67379 1760 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
01649561
JS
1761 "6002 Abort Request to rport DID x%06x "
1762 "for nvme_fc_req %p\n",
1763 pnvme_rport->port_id,
1764 pnvme_fcreq);
1765
01649561
JS
1766 /* If the hba is getting reset, this flag is set. It is
1767 * cleared when the reset is complete and rings reestablished.
1768 */
1769 spin_lock_irqsave(&phba->hbalock, flags);
1770 /* driver queued commands are in process of being flushed */
1771 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
1772 spin_unlock_irqrestore(&phba->hbalock, flags);
86c67379 1773 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561
JS
1774 "6139 Driver in reset cleanup - flushing "
1775 "NVME Req now. hba_flag x%x\n",
1776 phba->hba_flag);
1777 return;
1778 }
1779
bbe3012b 1780 lpfc_nbuf = freqpriv->nvme_buf;
01649561
JS
1781 if (!lpfc_nbuf) {
1782 spin_unlock_irqrestore(&phba->hbalock, flags);
86c67379 1783 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561
JS
1784 "6140 NVME IO req has no matching lpfc nvme "
1785 "io buffer. Skipping abort req.\n");
1786 return;
1787 } else if (!lpfc_nbuf->nvmeCmd) {
1788 spin_unlock_irqrestore(&phba->hbalock, flags);
86c67379 1789 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561
JS
1790 "6141 lpfc NVME IO req has no nvme_fcreq "
1791 "io buffer. Skipping abort req.\n");
1792 return;
1793 }
2b7824d0 1794 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
01649561 1795
c2017260
JS
1796 /* Guard against IO completion being called at same time */
1797 spin_lock(&lpfc_nbuf->buf_lock);
1798
01649561
JS
1799 /*
1800 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1801 * state must match the nvme_fcreq passed by the nvme
1802 * transport. If they don't match, it is likely the driver
1803 * has already completed the NVME IO and the nvme transport
1804 * has not seen it yet.
1805 */
1806 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
86c67379 1807 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561
JS
1808 "6143 NVME req mismatch: "
1809 "lpfc_nbuf %p nvmeCmd %p, "
2b7824d0 1810 "pnvme_fcreq %p. Skipping Abort xri x%x\n",
01649561 1811 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
2b7824d0 1812 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
c2017260 1813 goto out_unlock;
01649561
JS
1814 }
1815
1816 /* Don't abort IOs no longer on the pending queue. */
01649561 1817 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
86c67379 1818 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561 1819 "6142 NVME IO req %p not queued - skipping "
2b7824d0
JS
1820 "abort req xri x%x\n",
1821 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
c2017260 1822 goto out_unlock;
01649561
JS
1823 }
1824
4b056682 1825 atomic_inc(&lport->xmt_fcp_abort);
bd2cdd5e
JS
1826 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1827 nvmereq_wqe->sli4_xritag,
00cefeb9 1828 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
bd2cdd5e 1829
01649561
JS
1830 /* Outstanding abort is in progress */
1831 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
86c67379 1832 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561
JS
1833 "6144 Outstanding NVME I/O Abort Request "
1834 "still pending on nvme_fcreq %p, "
2b7824d0
JS
1835 "lpfc_ncmd %p xri x%x\n",
1836 pnvme_fcreq, lpfc_nbuf,
1837 nvmereq_wqe->sli4_xritag);
c2017260 1838 goto out_unlock;
01649561
JS
1839 }
1840
1841 abts_buf = __lpfc_sli_get_iocbq(phba);
1842 if (!abts_buf) {
86c67379 1843 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561 1844 "6136 No available abort wqes. Skipping "
2b7824d0
JS
1845 "Abts req for nvme_fcreq %p xri x%x\n",
1846 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
c2017260 1847 goto out_unlock;
01649561
JS
1848 }
1849
1850 /* Ready - mark outstanding as aborted by driver. */
1851 nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
1852
1853 /* Complete prepping the abort wqe and issue to the FW. */
1854 abts_wqe = &abts_buf->wqe;
1855
1856 /* WQEs are reused. Clear stale data and set key fields to
1857 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1858 */
d9f492a1 1859 memset(abts_wqe, 0, sizeof(*abts_wqe));
01649561
JS
1860 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1861
1862 /* word 7 */
01649561
JS
1863 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1864 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
1865 nvmereq_wqe->iocb.ulpClass);
1866
1867 /* word 8 - tell the FW to abort the IO associated with this
1868 * outstanding exchange ID.
1869 */
1870 abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
1871
1872 /* word 9 - this is the iotag for the abts_wqe completion. */
1873 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1874 abts_buf->iotag);
1875
1876 /* word 10 */
01649561
JS
1877 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1878 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1879
1880 /* word 11 */
1881 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1882 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1883 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1884
1885 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1886 abts_buf->iocb_flag |= LPFC_IO_NVME;
1887 abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
1888 abts_buf->vport = vport;
1889 abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
1fbf9742 1890 ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
c2017260 1891 spin_unlock(&lpfc_nbuf->buf_lock);
01649561 1892 spin_unlock_irqrestore(&phba->hbalock, flags);
cd22d605 1893 if (ret_val) {
86c67379 1894 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
01649561
JS
1895 "6137 Failed abts issue_wqe with status x%x "
1896 "for nvme_fcreq %p.\n",
1897 ret_val, pnvme_fcreq);
1898 lpfc_sli_release_iocbq(phba, abts_buf);
1899 return;
1900 }
1901
86c67379 1902 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
2b7824d0 1903 "6138 Transport Abort NVME Request Issued for "
01649561
JS
1904 "ox_id x%x on reqtag x%x\n",
1905 nvmereq_wqe->sli4_xritag,
1906 abts_buf->iotag);
c2017260
JS
1907 return;
1908
1909out_unlock:
1910 spin_unlock(&lpfc_nbuf->buf_lock);
1911 spin_unlock_irqrestore(&phba->hbalock, flags);
1912 return;
01649561
JS
1913}
1914
1915/* Declare and initialization an instance of the FC NVME template. */
1916static struct nvme_fc_port_template lpfc_nvme_template = {
1917 /* initiator-based functions */
1918 .localport_delete = lpfc_nvme_localport_delete,
1919 .remoteport_delete = lpfc_nvme_remoteport_delete,
1920 .create_queue = lpfc_nvme_create_queue,
1921 .delete_queue = lpfc_nvme_delete_queue,
1922 .ls_req = lpfc_nvme_ls_req,
1923 .fcp_io = lpfc_nvme_fcp_io_submit,
1924 .ls_abort = lpfc_nvme_ls_abort,
1925 .fcp_abort = lpfc_nvme_fcp_abort,
1926
1927 .max_hw_queues = 1,
1928 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1929 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1930 .dma_boundary = 0xFFFFFFFF,
1931
1932 /* Sizes of additional private data for data structures.
1933 * No use for the last two sizes at this time.
1934 */
1935 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1936 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1937 .lsrqst_priv_sz = 0,
bbe3012b 1938 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
01649561
JS
1939};
1940
1941/**
5e5b511d 1942 * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
01649561 1943 * @phba: The HBA for which this call is being executed.
01649561 1944 *
5e5b511d 1945 * This routine removes a nvme buffer from head of @hdwq io_buf_list
01649561 1946 * and returns to caller.
01649561
JS
1947 *
1948 * Return codes:
01649561
JS
1949 * NULL - Error
1950 * Pointer to lpfc_nvme_buf - Success
01649561 1951 **/
c490850a 1952static struct lpfc_io_buf *
cf1a1d3e 1953lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
5e5b511d 1954 int idx, int expedite)
01649561 1955{
c490850a 1956 struct lpfc_io_buf *lpfc_ncmd;
5e5b511d 1957 struct lpfc_sli4_hdw_queue *qp;
0794d601 1958 struct sli4_sge *sgl;
01649561
JS
1959 struct lpfc_iocbq *pwqeq;
1960 union lpfc_wqe128 *wqe;
01649561 1961
c490850a 1962 lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
2a5b7d62 1963
0794d601 1964 if (lpfc_ncmd) {
01649561 1965 pwqeq = &(lpfc_ncmd->cur_iocbq);
205e8240 1966 wqe = &pwqeq->wqe;
01649561 1967
0794d601
JS
1968 /* Setup key fields in buffer that may have been changed
1969 * if other protocols used this buffer.
1970 */
1971 pwqeq->iocb_flag = LPFC_IO_NVME;
01649561 1972 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
0794d601
JS
1973 lpfc_ncmd->start_time = jiffies;
1974 lpfc_ncmd->flags = 0;
01649561
JS
1975
1976 /* Rsp SGE will be filled in when we rcv an IO
1977 * from the NVME Layer to be sent.
1978 * The cmd is going to be embedded so we need a SKIP SGE.
1979 */
0794d601 1980 sgl = lpfc_ncmd->dma_sgl;
01649561
JS
1981 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1982 bf_set(lpfc_sli4_sge_last, sgl, 0);
1983 sgl->word2 = cpu_to_le32(sgl->word2);
1984 /* Fill in word 3 / sgl_len during cmd submission */
1985
d9f492a1 1986 /* Initialize 64 bytes only */
5fd11085 1987 memset(wqe, 0, sizeof(union lpfc_wqe));
01649561 1988
0794d601
JS
1989 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1990 atomic_inc(&ndlp->cmd_pending);
c490850a 1991 lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
0794d601 1992 }
01649561 1993
c490850a
JS
1994 } else {
1995 qp = &phba->sli4_hba.hdwq[idx];
5e5b511d 1996 qp->empty_io_bufs++;
01649561 1997 }
2a5b7d62 1998
01649561
JS
1999 return lpfc_ncmd;
2000}
2001
2002/**
2003 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2004 * @phba: The Hba for which this call is being executed.
2005 * @lpfc_ncmd: The nvme buffer which is being released.
2006 *
2007 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
5e5b511d 2008 * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
01649561
JS
2009 * and cannot be reused for at least RA_TOV amount of time if it was
2010 * aborted.
2011 **/
2012static void
c490850a 2013lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
01649561 2014{
5e5b511d 2015 struct lpfc_sli4_hdw_queue *qp;
01649561
JS
2016 unsigned long iflag = 0;
2017
c490850a 2018 if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2a5b7d62
JS
2019 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
2020
2a5b7d62 2021 lpfc_ncmd->ndlp = NULL;
c490850a 2022 lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
2a5b7d62 2023
1fbf9742 2024 qp = lpfc_ncmd->hdwq;
318083ad 2025 if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
86c67379
JS
2026 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2027 "6310 XB release deferred for "
2028 "ox_id x%x on reqtag x%x\n",
2029 lpfc_ncmd->cur_iocbq.sli4_xritag,
2030 lpfc_ncmd->cur_iocbq.iotag);
2031
5e5b511d 2032 spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag);
01649561 2033 list_add_tail(&lpfc_ncmd->list,
5e5b511d
JS
2034 &qp->lpfc_abts_nvme_buf_list);
2035 qp->abts_nvme_io_bufs++;
2036 spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag);
c490850a
JS
2037 } else
2038 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
01649561
JS
2039}
2040
2041/**
2042 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2043 * @pvport - the lpfc_vport instance requesting a localport.
2044 *
2045 * This routine is invoked to create an nvme localport instance to bind
2046 * to the nvme_fc_transport. It is called once during driver load
2047 * like lpfc_create_shost after all other services are initialized.
2048 * It requires a vport, vpi, and wwns at call time. Other localport
2049 * parameters are modified as the driver's FCID and the Fabric WWN
2050 * are established.
2051 *
2052 * Return codes
2053 * 0 - successful
2054 * -ENOMEM - no heap memory available
2055 * other values - from nvme registration upcall
2056 **/
2057int
2058lpfc_nvme_create_localport(struct lpfc_vport *vport)
2059{
166d7211 2060 int ret = 0;
01649561
JS
2061 struct lpfc_hba *phba = vport->phba;
2062 struct nvme_fc_port_info nfcp_info;
2063 struct nvme_fc_local_port *localport;
2064 struct lpfc_nvme_lport *lport;
01649561
JS
2065
2066 /* Initialize this localport instance. The vport wwn usage ensures
2067 * that NPIV is accounted for.
2068 */
2069 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2070 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2071 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2072 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2073
5b9e70b2
JS
2074 /* We need to tell the transport layer + 1 because it takes page
2075 * alignment into account. When space for the SGL is allocated we
2076 * allocate + 3, one for cmd, one for rsp and one for this alignment
4d4c4a4a 2077 */
4d4c4a4a 2078 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
01649561 2079
6a828b0f
JS
2080 /* Advertise how many hw queues we support based on fcp_io_sched */
2081 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
2082 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2083 else
2084 lpfc_nvme_template.max_hw_queues =
2085 phba->sli4_hba.num_present_cpu;
66a210ff 2086
faf5a744
AB
2087 if (!IS_ENABLED(CONFIG_NVME_FC))
2088 return ret;
2089
01649561
JS
2090 /* localport is allocated from the stack, but the registration
2091 * call allocates heap memory as well as the private area.
2092 */
faf5a744 2093
01649561
JS
2094 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2095 &vport->phba->pcidev->dev, &localport);
2096 if (!ret) {
2097 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2098 "6005 Successfully registered local "
2099 "NVME port num %d, localP %p, private %p, "
2100 "sg_seg %d\n",
2101 localport->port_num, localport,
2102 localport->private,
2103 lpfc_nvme_template.max_sgl_segments);
2104
2105 /* Private is our lport size declared in the template. */
2106 lport = (struct lpfc_nvme_lport *)localport->private;
2107 vport->localport = localport;
2108 lport->vport = vport;
01649561 2109 vport->nvmei_support = 1;
6b486ce9 2110
4b056682
JS
2111 atomic_set(&lport->xmt_fcp_noxri, 0);
2112 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2113 atomic_set(&lport->xmt_fcp_qdepth, 0);
44c2757b 2114 atomic_set(&lport->xmt_fcp_err, 0);
4b056682
JS
2115 atomic_set(&lport->xmt_fcp_wqerr, 0);
2116 atomic_set(&lport->xmt_fcp_abort, 0);
2117 atomic_set(&lport->xmt_ls_abort, 0);
2118 atomic_set(&lport->xmt_ls_err, 0);
2119 atomic_set(&lport->cmpl_fcp_xb, 0);
2120 atomic_set(&lport->cmpl_fcp_err, 0);
2121 atomic_set(&lport->cmpl_ls_xb, 0);
2122 atomic_set(&lport->cmpl_ls_err, 0);
66a210ff
JS
2123 atomic_set(&lport->fc4NvmeLsRequests, 0);
2124 atomic_set(&lport->fc4NvmeLsCmpls, 0);
01649561
JS
2125 }
2126
01649561
JS
2127 return ret;
2128}
2129
d964b3e5 2130#if (IS_ENABLED(CONFIG_NVME_FC))
add9d6be
JS
2131/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2132 *
2133 * The driver has to wait for the host nvme transport to callback
2134 * indicating the localport has successfully unregistered all
2135 * resources. Since this is an uninterruptible wait, loop every ten
2136 * seconds and print a message indicating no progress.
2137 *
2138 * An uninterruptible wait is used because of the risk of transport-to-
2139 * driver state mismatch.
2140 */
3999df75 2141static void
add9d6be 2142lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
7961cba6
EM
2143 struct lpfc_nvme_lport *lport,
2144 struct completion *lport_unreg_cmp)
add9d6be 2145{
add9d6be 2146 u32 wait_tmo;
2ab70c21
JS
2147 int ret, i, pending = 0;
2148 struct lpfc_sli_ring *pring;
2149 struct lpfc_hba *phba = vport->phba;
add9d6be
JS
2150
2151 /* Host transport has to clean up and confirm requiring an indefinite
2152 * wait. Print a message if a 10 second wait expires and renew the
2153 * wait. This is unexpected.
2154 */
2155 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2156 while (true) {
7961cba6 2157 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
add9d6be 2158 if (unlikely(!ret)) {
2ab70c21
JS
2159 pending = 0;
2160 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2161 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
2162 if (!pring)
2163 continue;
2164 if (pring->txcmplq_cnt)
2165 pending += pring->txcmplq_cnt;
2166 }
add9d6be
JS
2167 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
2168 "6176 Lport %p Localport %p wait "
2ab70c21
JS
2169 "timed out. Pending %d. Renewing.\n",
2170 lport, vport->localport, pending);
add9d6be
JS
2171 continue;
2172 }
2173 break;
2174 }
2175 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2176 "6177 Lport %p Localport %p Complete Success\n",
2177 lport, vport->localport);
add9d6be 2178}
d964b3e5 2179#endif
add9d6be 2180
01649561
JS
2181/**
2182 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2183 * @pnvme: pointer to lpfc nvme data structure.
2184 *
2185 * This routine is invoked to destroy all lports bound to the phba.
2186 * The lport memory was allocated by the nvme fc transport and is
2187 * released there. This routine ensures all rports bound to the
2188 * lport have been disconnected.
2189 *
2190 **/
2191void
2192lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2193{
7d708033 2194#if (IS_ENABLED(CONFIG_NVME_FC))
01649561
JS
2195 struct nvme_fc_local_port *localport;
2196 struct lpfc_nvme_lport *lport;
01649561 2197 int ret;
7961cba6 2198 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
01649561
JS
2199
2200 if (vport->nvmei_support == 0)
2201 return;
2202
2203 localport = vport->localport;
01649561
JS
2204 lport = (struct lpfc_nvme_lport *)localport->private;
2205
2206 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2207 "6011 Destroying NVME localport %p\n",
2208 localport);
166d7211 2209
01649561
JS
2210 /* lport's rport list is clear. Unregister
2211 * lport and release resources.
2212 */
7961cba6 2213 lport->lport_unreg_cmp = &lport_unreg_cmp;
01649561 2214 ret = nvme_fc_unregister_localport(localport);
add9d6be
JS
2215
2216 /* Wait for completion. This either blocks
2217 * indefinitely or succeeds
2218 */
7961cba6
EM
2219 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2220 vport->localport = NULL;
01649561
JS
2221
2222 /* Regardless of the unregister upcall response, clear
2223 * nvmei_support. All rports are unregistered and the
2224 * driver will clean up.
2225 */
2226 vport->nvmei_support = 0;
2227 if (ret == 0) {
2228 lpfc_printf_vlog(vport,
2229 KERN_INFO, LOG_NVME_DISC,
2230 "6009 Unregistered lport Success\n");
2231 } else {
2232 lpfc_printf_vlog(vport,
2233 KERN_INFO, LOG_NVME_DISC,
2234 "6010 Unregistered lport "
2235 "Failed, status x%x\n",
2236 ret);
2237 }
166d7211 2238#endif
01649561
JS
2239}
2240
2241void
2242lpfc_nvme_update_localport(struct lpfc_vport *vport)
2243{
4410a67a 2244#if (IS_ENABLED(CONFIG_NVME_FC))
01649561
JS
2245 struct nvme_fc_local_port *localport;
2246 struct lpfc_nvme_lport *lport;
2247
2248 localport = vport->localport;
4410a67a
JS
2249 if (!localport) {
2250 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2251 "6710 Update NVME fail. No localport\n");
2252 return;
2253 }
01649561 2254 lport = (struct lpfc_nvme_lport *)localport->private;
4410a67a
JS
2255 if (!lport) {
2256 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2257 "6171 Update NVME fail. localP %p, No lport\n",
2258 localport);
2259 return;
2260 }
01649561
JS
2261 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2262 "6012 Update NVME lport %p did x%x\n",
2263 localport, vport->fc_myDID);
2264
2265 localport->port_id = vport->fc_myDID;
2266 if (localport->port_id == 0)
2267 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2268 else
2269 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2270
2271 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2272 "6030 bound lport %p to DID x%06x\n",
2273 lport, localport->port_id);
4410a67a 2274#endif
01649561
JS
2275}
2276
2277int
2278lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2279{
7d708033 2280#if (IS_ENABLED(CONFIG_NVME_FC))
01649561
JS
2281 int ret = 0;
2282 struct nvme_fc_local_port *localport;
2283 struct lpfc_nvme_lport *lport;
2284 struct lpfc_nvme_rport *rport;
01466024 2285 struct lpfc_nvme_rport *oldrport;
01649561
JS
2286 struct nvme_fc_remote_port *remote_port;
2287 struct nvme_fc_port_info rpinfo;
93a3922d 2288 struct lpfc_nodelist *prev_ndlp = NULL;
01649561
JS
2289
2290 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2291 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2292 ndlp->nlp_DID, ndlp->nlp_type);
2293
2294 localport = vport->localport;
bb6a8a2c
DK
2295 if (!localport)
2296 return 0;
2297
01649561
JS
2298 lport = (struct lpfc_nvme_lport *)localport->private;
2299
7a06dcd3
JS
2300 /* NVME rports are not preserved across devloss.
2301 * Just register this instance. Note, rpinfo->dev_loss_tmo
2302 * is left 0 to indicate accept transport defaults. The
2303 * driver communicates port role capabilities consistent
2304 * with the PRLI response data.
2305 */
2306 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2307 rpinfo.port_id = ndlp->nlp_DID;
2308 if (ndlp->nlp_type & NLP_NVME_TARGET)
2309 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2310 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2311 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2312
2313 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2314 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2315
2316 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2317 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
01466024 2318
9e210178 2319 spin_lock_irq(&vport->phba->hbalock);
01466024 2320 oldrport = lpfc_ndlp_get_nrport(ndlp);
61184f17
JS
2321 if (oldrport) {
2322 prev_ndlp = oldrport->ndlp;
2323 spin_unlock_irq(&vport->phba->hbalock);
2324 } else {
2325 spin_unlock_irq(&vport->phba->hbalock);
3fd78355 2326 lpfc_nlp_get(ndlp);
61184f17 2327 }
3fd78355 2328
7a06dcd3
JS
2329 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2330 if (!ret) {
2331 /* If the ndlp already has an nrport, this is just
2332 * a resume of the existing rport. Else this is a
2333 * new rport.
01649561 2334 */
b15bd3e6
JS
2335 /* Guard against an unregister/reregister
2336 * race that leaves the WAIT flag set.
2337 */
2338 spin_lock_irq(&vport->phba->hbalock);
2339 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2340 spin_unlock_irq(&vport->phba->hbalock);
7a06dcd3 2341 rport = remote_port->private;
01466024 2342 if (oldrport) {
93a3922d
JS
2343 /* New remoteport record does not guarantee valid
2344 * host private memory area.
2345 */
01466024 2346 if (oldrport == remote_port->private) {
93a3922d
JS
2347 /* Same remoteport - ndlp should match.
2348 * Just reuse.
2349 */
3fd78355
JS
2350 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2351 LOG_NVME_DISC,
6825b7bd 2352 "6014 Rebind lport to current "
3fd78355 2353 "remoteport %p wwpn 0x%llx, "
93a3922d 2354 "Data: x%x x%x %p %p x%x x%06x\n",
3fd78355
JS
2355 remote_port,
2356 remote_port->port_name,
2357 remote_port->port_id,
2358 remote_port->port_role,
61184f17 2359 oldrport->ndlp,
3fd78355
JS
2360 ndlp,
2361 ndlp->nlp_type,
2362 ndlp->nlp_DID);
6825b7bd
JS
2363
2364 /* It's a complete rebind only if the driver
2365 * is registering with the same ndlp. Otherwise
2366 * the driver likely executed a node swap
2367 * prior to this registration and the ndlp to
2368 * remoteport binding needs to be redone.
2369 */
2370 if (prev_ndlp == ndlp)
2371 return 0;
2372
3fd78355 2373 }
2b75d0f9 2374
3fd78355
JS
2375 /* Sever the ndlp<->rport association
2376 * before dropping the ndlp ref from
2377 * register.
2b75d0f9 2378 */
3fd78355 2379 spin_lock_irq(&vport->phba->hbalock);
2b75d0f9 2380 ndlp->nrport = NULL;
01466024 2381 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
3fd78355 2382 spin_unlock_irq(&vport->phba->hbalock);
2b75d0f9 2383 rport->ndlp = NULL;
3fd78355 2384 rport->remoteport = NULL;
b04744ce
JS
2385
2386 /* Reference only removed if previous NDLP is no longer
2387 * active. It might be just a swap and removing the
2388 * reference would cause a premature cleanup.
2389 */
2390 if (prev_ndlp && prev_ndlp != ndlp) {
2391 if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
2392 (!prev_ndlp->nrport))
2393 lpfc_nlp_put(prev_ndlp);
2394 }
01649561 2395 }
2b75d0f9
DK
2396
2397 /* Clean bind the rport to the ndlp. */
2398 rport->remoteport = remote_port;
2399 rport->lport = lport;
3fd78355
JS
2400 rport->ndlp = ndlp;
2401 spin_lock_irq(&vport->phba->hbalock);
2b75d0f9 2402 ndlp->nrport = rport;
3fd78355 2403 spin_unlock_irq(&vport->phba->hbalock);
2b75d0f9
DK
2404 lpfc_printf_vlog(vport, KERN_INFO,
2405 LOG_NVME_DISC | LOG_NODE,
6825b7bd
JS
2406 "6022 Bind lport x%px to remoteport x%px "
2407 "rport x%px WWNN 0x%llx, "
2b75d0f9 2408 "Rport WWPN 0x%llx DID "
93a3922d
JS
2409 "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
2410 lport, remote_port, rport,
2b75d0f9 2411 rpinfo.node_name, rpinfo.port_name,
3fd78355 2412 rpinfo.port_id, rpinfo.port_role,
93a3922d 2413 ndlp, prev_ndlp);
01649561 2414 } else {
7a06dcd3
JS
2415 lpfc_printf_vlog(vport, KERN_ERR,
2416 LOG_NVME_DISC | LOG_NODE,
2417 "6031 RemotePort Registration failed "
2418 "err: %d, DID x%06x\n",
2419 ret, ndlp->nlp_DID);
01649561 2420 }
7a06dcd3 2421
01649561 2422 return ret;
166d7211
JS
2423#else
2424 return 0;
2425#endif
01649561
JS
2426}
2427
6f2589f4
JS
2428/**
2429 * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
2430 *
2431 * If the ndlp represents an NVME Target, that we are logged into,
2432 * ping the NVME FC Transport layer to initiate a device rescan
2433 * on this remote NPort.
2434 */
2435void
2436lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2437{
2438#if (IS_ENABLED(CONFIG_NVME_FC))
a6d10f24
JS
2439 struct lpfc_nvme_rport *nrport;
2440 struct nvme_fc_remote_port *remoteport = NULL;
6f2589f4 2441
a6d10f24
JS
2442 spin_lock_irq(&vport->phba->hbalock);
2443 nrport = lpfc_ndlp_get_nrport(ndlp);
2444 if (nrport)
2445 remoteport = nrport->remoteport;
2446 spin_unlock_irq(&vport->phba->hbalock);
6f2589f4
JS
2447
2448 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2449 "6170 Rescan NPort DID x%06x type x%x "
a6d10f24
JS
2450 "state x%x nrport x%px remoteport x%px\n",
2451 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2452 nrport, remoteport);
2453
2454 if (!nrport || !remoteport)
2455 goto rescan_exit;
6f2589f4
JS
2456
2457 /* Only rescan if we are an NVME target in the MAPPED state */
2458 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2459 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2460 nvme_fc_rescan_remoteport(remoteport);
2461
2462 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2463 "6172 NVME rescanned DID x%06x "
2464 "port_state x%x\n",
2465 ndlp->nlp_DID, remoteport->port_state);
2466 }
2467 return;
a6d10f24
JS
2468 rescan_exit:
2469 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2470 "6169 Skip NVME Rport Rescan, NVME remoteport "
2471 "unregistered\n");
6f2589f4
JS
2472#endif
2473}
2474
01649561
JS
2475/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2476 *
2477 * There is no notion of Devloss or rport recovery from the current
2478 * nvme_transport perspective. Loss of an rport just means IO cannot
2479 * be sent and recovery is completely up to the initator.
2480 * For now, the driver just unbinds the DID and port_role so that
2481 * no further IO can be issued. Changes are planned for later.
2482 *
2483 * Notes - the ndlp reference count is not decremented here since
2484 * since there is no nvme_transport api for devloss. Node ref count
2485 * is only adjusted in driver unload.
2486 */
2487void
2488lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2489{
7d708033 2490#if (IS_ENABLED(CONFIG_NVME_FC))
01649561
JS
2491 int ret;
2492 struct nvme_fc_local_port *localport;
2493 struct lpfc_nvme_lport *lport;
2494 struct lpfc_nvme_rport *rport;
9e210178 2495 struct nvme_fc_remote_port *remoteport = NULL;
01649561
JS
2496
2497 localport = vport->localport;
2498
2499 /* This is fundamental error. The localport is always
2500 * available until driver unload. Just exit.
2501 */
2502 if (!localport)
2503 return;
2504
2505 lport = (struct lpfc_nvme_lport *)localport->private;
2506 if (!lport)
2507 goto input_err;
2508
9e210178 2509 spin_lock_irq(&vport->phba->hbalock);
01466024 2510 rport = lpfc_ndlp_get_nrport(ndlp);
9e210178
JS
2511 if (rport)
2512 remoteport = rport->remoteport;
2513 spin_unlock_irq(&vport->phba->hbalock);
2514 if (!remoteport)
01649561
JS
2515 goto input_err;
2516
01649561
JS
2517 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2518 "6033 Unreg nvme remoteport %p, portname x%llx, "
2519 "port_id x%06x, portstate x%x port type x%x\n",
2520 remoteport, remoteport->port_name,
2521 remoteport->port_id, remoteport->port_state,
2522 ndlp->nlp_type);
2523
2524 /* Sanity check ndlp type. Only call for NVME ports. Don't
2525 * clear any rport state until the transport calls back.
2526 */
3b5bde69
JS
2527
2528 if (ndlp->nlp_type & NLP_NVME_TARGET) {
7a06dcd3
JS
2529 /* No concern about the role change on the nvme remoteport.
2530 * The transport will update it.
2531 */
add9d6be 2532 ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
7438273f
JS
2533
2534 /* Don't let the host nvme transport keep sending keep-alives
2535 * on this remoteport. Vport is unloading, no recovery. The
2536 * return values is ignored. The upcall is a courtesy to the
2537 * transport.
2538 */
2539 if (vport->load_flag & FC_UNLOADING)
2540 (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2541
01649561 2542 ret = nvme_fc_unregister_remoteport(remoteport);
3fd78355
JS
2543 if (ret != 0) {
2544 lpfc_nlp_put(ndlp);
01649561
JS
2545 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2546 "6167 NVME unregister failed %d "
2547 "port_state x%x\n",
2548 ret, remoteport->port_state);
3fd78355 2549 }
01649561
JS
2550 }
2551 return;
2552
2553 input_err:
166d7211 2554#endif
01649561 2555 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2b7824d0 2556 "6168 State error: lport %p, rport%p FCID x%06x\n",
01649561
JS
2557 vport->localport, ndlp->rport, ndlp->nlp_DID);
2558}
318083ad
JS
2559
2560/**
2561 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2562 * @phba: pointer to lpfc hba data structure.
2563 * @axri: pointer to the fcp xri abort wcqe structure.
2564 *
2565 * This routine is invoked by the worker thread to process a SLI4 fast-path
952c303b
DK
2566 * NVME aborted xri. Aborted NVME IO commands are completed to the transport
2567 * here.
318083ad
JS
2568 **/
2569void
2570lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
5e5b511d 2571 struct sli4_wcqe_xri_aborted *axri, int idx)
318083ad
JS
2572{
2573 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
c490850a 2574 struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd;
952c303b 2575 struct nvmefc_fcp_req *nvme_cmd = NULL;
318083ad 2576 struct lpfc_nodelist *ndlp;
5e5b511d 2577 struct lpfc_sli4_hdw_queue *qp;
318083ad 2578 unsigned long iflag = 0;
318083ad
JS
2579
2580 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
2581 return;
5e5b511d 2582 qp = &phba->sli4_hba.hdwq[idx];
318083ad 2583 spin_lock_irqsave(&phba->hbalock, iflag);
5e5b511d 2584 spin_lock(&qp->abts_nvme_buf_list_lock);
318083ad 2585 list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
5e5b511d 2586 &qp->lpfc_abts_nvme_buf_list, list) {
318083ad 2587 if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
bbe3012b 2588 list_del_init(&lpfc_ncmd->list);
5e5b511d 2589 qp->abts_nvme_io_bufs--;
318083ad
JS
2590 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2591 lpfc_ncmd->status = IOSTAT_SUCCESS;
5e5b511d 2592 spin_unlock(&qp->abts_nvme_buf_list_lock);
318083ad 2593
318083ad
JS
2594 spin_unlock_irqrestore(&phba->hbalock, iflag);
2595 ndlp = lpfc_ncmd->ndlp;
952c303b 2596 if (ndlp)
318083ad 2597 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
86c67379
JS
2598
2599 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
952c303b
DK
2600 "6311 nvme_cmd %p xri x%x tag x%x "
2601 "abort complete and xri released\n",
2602 lpfc_ncmd->nvmeCmd, xri,
2603 lpfc_ncmd->cur_iocbq.iotag);
2604
2605 /* Aborted NVME commands are required to not complete
2606 * before the abort exchange command fully completes.
2607 * Once completed, it is available via the put list.
2608 */
3fd78355
JS
2609 if (lpfc_ncmd->nvmeCmd) {
2610 nvme_cmd = lpfc_ncmd->nvmeCmd;
2611 nvme_cmd->done(nvme_cmd);
2612 lpfc_ncmd->nvmeCmd = NULL;
2613 }
318083ad 2614 lpfc_release_nvme_buf(phba, lpfc_ncmd);
318083ad
JS
2615 return;
2616 }
2617 }
5e5b511d 2618 spin_unlock(&qp->abts_nvme_buf_list_lock);
318083ad 2619 spin_unlock_irqrestore(&phba->hbalock, iflag);
86c67379
JS
2620
2621 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2622 "6312 XRI Aborted xri x%x not found\n", xri);
2623
318083ad 2624}
c3725bdc
JS
2625
2626/**
2627 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2628 * @phba: Pointer to HBA context object.
2629 *
2630 * This function flushes all wqes in the nvme rings and frees all resources
2631 * in the txcmplq. This function does not issue abort wqes for the IO
2632 * commands in txcmplq, they will just be returned with
2633 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2634 * slot has been permanently disabled.
2635 **/
2636void
2637lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2638{
2639 struct lpfc_sli_ring *pring;
2640 u32 i, wait_cnt = 0;
2641
cdb42bec 2642 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
c3725bdc
JS
2643 return;
2644
2645 /* Cycle through all NVME rings and make sure all outstanding
2646 * WQEs have been removed from the txcmplqs.
2647 */
cdb42bec 2648 for (i = 0; i < phba->cfg_hdw_queue; i++) {
6a828b0f
JS
2649 if (!phba->sli4_hba.hdwq[i].nvme_wq)
2650 continue;
cdb42bec 2651 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
c3725bdc 2652
d580c613
JS
2653 if (!pring)
2654 continue;
2655
c3725bdc
JS
2656 /* Retrieve everything on the txcmplq */
2657 while (!list_empty(&pring->txcmplq)) {
2658 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2659 wait_cnt++;
2660
2661 /* The sleep is 10mS. Every ten seconds,
2662 * dump a message. Something is wrong.
2663 */
2664 if ((wait_cnt % 1000) == 0) {
2665 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2666 "6178 NVME IO not empty, "
2667 "cnt %d\n", wait_cnt);
2668 }
2669 }
2670 }
2671}
84f2ddf8
JS
2672
2673void
2674lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
2675{
2676#if (IS_ENABLED(CONFIG_NVME_FC))
2677 struct lpfc_io_buf *lpfc_ncmd;
2678 struct nvmefc_fcp_req *nCmd;
2679 struct lpfc_nvme_fcpreq_priv *freqpriv;
2680
2681 if (!pwqeIn->context1) {
2682 lpfc_sli_release_iocbq(phba, pwqeIn);
2683 return;
2684 }
2685 /* For abort iocb just return, IO iocb will do a done call */
2686 if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2687 CMD_ABORT_XRI_CX) {
2688 lpfc_sli_release_iocbq(phba, pwqeIn);
2689 return;
2690 }
2691 lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
2692
2693 spin_lock(&lpfc_ncmd->buf_lock);
2694 if (!lpfc_ncmd->nvmeCmd) {
2695 spin_unlock(&lpfc_ncmd->buf_lock);
2696 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2697 return;
2698 }
2699
2700 nCmd = lpfc_ncmd->nvmeCmd;
2701 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2702 "6194 NVME Cancel xri %x\n",
2703 lpfc_ncmd->cur_iocbq.sli4_xritag);
2704
2705 nCmd->transferred_length = 0;
2706 nCmd->rcv_rsplen = 0;
2707 nCmd->status = NVME_SC_INTERNAL;
2708 freqpriv = nCmd->private;
2709 freqpriv->nvme_buf = NULL;
2710 lpfc_ncmd->nvmeCmd = NULL;
2711
2712 spin_unlock(&lpfc_ncmd->buf_lock);
2713 nCmd->done(nCmd);
2714
2715 /* Call release with XB=1 to queue the IO into the abort list. */
2716 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2717#endif
2718}