1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
43 #include "lpfc_version.h"
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60 struct lpfc_nvmet_rcv_ctx *,
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64 struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66 struct lpfc_nvmet_rcv_ctx *,
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69 struct lpfc_nvmet_rcv_ctx *,
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72 struct lpfc_nvmet_rcv_ctx *,
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75 struct lpfc_nvmet_rcv_ctx *);
76 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
78 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
80 static union lpfc_wqe128 lpfc_tsend_cmd_template;
81 static union lpfc_wqe128 lpfc_treceive_cmd_template;
82 static union lpfc_wqe128 lpfc_trsp_cmd_template;
84 /* Setup WQE templates for NVME IOs */
86 lpfc_nvmet_cmd_template(void)
88 union lpfc_wqe128 *wqe;
91 wqe = &lpfc_tsend_cmd_template;
92 memset(wqe, 0, sizeof(union lpfc_wqe128));
94 /* Word 0, 1, 2 - BDE is variable */
96 /* Word 3 - payload_offset_len is zero */
98 /* Word 4 - relative_offset is variable */
100 /* Word 5 - is zero */
102 /* Word 6 - ctxt_tag, xri_tag is variable */
104 /* Word 7 - wqe_ar is variable */
105 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
106 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
107 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
108 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
109 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
111 /* Word 8 - abort_tag is variable */
113 /* Word 9 - reqtag, rcvoxid is variable */
115 /* Word 10 - wqes, xc is variable */
116 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
117 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
118 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
119 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
120 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
121 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
123 /* Word 11 - sup, irsp, irsplen is variable */
124 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
125 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
126 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
127 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
128 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
129 bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
131 /* Word 12 - fcp_data_len is variable */
133 /* Word 13, 14, 15 - PBDE is zero */
135 /* TRECEIVE template */
136 wqe = &lpfc_treceive_cmd_template;
137 memset(wqe, 0, sizeof(union lpfc_wqe128));
139 /* Word 0, 1, 2 - BDE is variable */
142 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
144 /* Word 4 - relative_offset is variable */
146 /* Word 5 - is zero */
148 /* Word 6 - ctxt_tag, xri_tag is variable */
151 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
152 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
153 bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
154 bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
155 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
157 /* Word 8 - abort_tag is variable */
159 /* Word 9 - reqtag, rcvoxid is variable */
161 /* Word 10 - xc is variable */
162 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
163 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
164 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
165 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
166 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
167 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
169 /* Word 11 - pbde is variable */
170 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
171 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
172 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
173 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
174 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
175 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
177 /* Word 12 - fcp_data_len is variable */
179 /* Word 13, 14, 15 - PBDE is variable */
182 wqe = &lpfc_trsp_cmd_template;
183 memset(wqe, 0, sizeof(union lpfc_wqe128));
185 /* Word 0, 1, 2 - BDE is variable */
187 /* Word 3 - response_len is variable */
189 /* Word 4, 5 - is zero */
191 /* Word 6 - ctxt_tag, xri_tag is variable */
194 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
195 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
196 bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
197 bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
198 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
200 /* Word 8 - abort_tag is variable */
202 /* Word 9 - reqtag is variable */
204 /* Word 10 wqes, xc is variable */
205 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
206 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
207 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
208 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
209 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
210 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
212 /* Word 11 irsp, irsplen is variable */
213 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
214 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
215 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
216 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
217 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
218 bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
220 /* Word 12, 13, 14, 15 - is zero */
223 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
224 static struct lpfc_nvmet_rcv_ctx *
225 lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
227 struct lpfc_nvmet_rcv_ctx *ctxp;
231 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
232 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
233 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
239 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
246 static struct lpfc_nvmet_rcv_ctx *
247 lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
249 struct lpfc_nvmet_rcv_ctx *ctxp;
253 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
254 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
255 if (ctxp->oxid != oxid || ctxp->sid != sid)
261 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
270 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
272 lockdep_assert_held(&ctxp->ctxlock);
274 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
275 "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
276 ctxp->oxid, ctxp->flag);
278 if (ctxp->flag & LPFC_NVMET_CTX_RLS)
281 ctxp->flag |= LPFC_NVMET_CTX_RLS;
282 spin_lock(&phba->sli4_hba.t_active_list_lock);
283 list_del(&ctxp->list);
284 spin_unlock(&phba->sli4_hba.t_active_list_lock);
285 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
286 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
287 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
291 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
292 * @phba: Pointer to HBA context object.
293 * @cmdwqe: Pointer to driver command WQE object.
294 * @wcqe: Pointer to driver response CQE object.
296 * The function is called from SLI ring event handler with no
297 * lock held. This function is the completion handler for NVME LS commands
298 * The function frees memory resources used for the NVME commands.
301 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
302 struct lpfc_wcqe_complete *wcqe)
304 struct lpfc_nvmet_tgtport *tgtp;
305 struct nvmefc_tgt_ls_req *rsp;
306 struct lpfc_nvmet_rcv_ctx *ctxp;
307 uint32_t status, result;
309 status = bf_get(lpfc_wcqe_c_status, wcqe);
310 result = wcqe->parameter;
311 ctxp = cmdwqe->context2;
313 if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
314 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
315 "6410 NVMET LS cmpl state mismatch IO x%x: "
317 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
320 if (!phba->targetport)
323 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
327 atomic_inc(&tgtp->xmt_ls_rsp_error);
328 if (result == IOERR_ABORT_REQUESTED)
329 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
330 if (bf_get(lpfc_wcqe_c_xb, wcqe))
331 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
333 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
338 rsp = &ctxp->ctx.ls_req;
340 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
341 ctxp->oxid, status, result);
343 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
344 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
345 status, result, ctxp->oxid);
347 lpfc_nlp_put(cmdwqe->context1);
348 cmdwqe->context2 = NULL;
349 cmdwqe->context3 = NULL;
350 lpfc_sli_release_iocbq(phba, cmdwqe);
356 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
357 * @phba: HBA buffer is associated with
358 * @ctxp: context to clean up
359 * @mp: Buffer to free
361 * Description: Frees the given DMA buffer in the appropriate way given by
362 * reposting it to its associated RQ so it can be reused.
364 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
369 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
371 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
372 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
373 struct lpfc_nvmet_tgtport *tgtp;
374 struct fc_frame_header *fc_hdr;
375 struct rqb_dmabuf *nvmebuf;
376 struct lpfc_nvmet_ctx_info *infop;
377 uint32_t size, oxid, sid;
382 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
385 ctxp->txrdy_phys = 0;
388 if (ctxp->state == LPFC_NVMET_STE_FREE) {
389 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
390 "6411 NVMET free, already free IO x%x: %d %d\n",
391 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
394 if (ctxp->rqb_buffer) {
395 spin_lock_irqsave(&ctxp->ctxlock, iflag);
396 nvmebuf = ctxp->rqb_buffer;
397 /* check if freed in another path whilst acquiring lock */
399 ctxp->rqb_buffer = NULL;
400 if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
401 ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
402 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
403 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
406 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
408 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
411 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
414 ctxp->state = LPFC_NVMET_STE_FREE;
416 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
417 if (phba->sli4_hba.nvmet_io_wait_cnt) {
418 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
419 nvmebuf, struct rqb_dmabuf,
421 phba->sli4_hba.nvmet_io_wait_cnt--;
422 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
425 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
426 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
427 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
428 size = nvmebuf->bytes_recv;
429 sid = sli4_sid_from_fc_hdr(fc_hdr);
431 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
439 ctxp->state = LPFC_NVMET_STE_RCV;
442 ctxp->ctxbuf = ctx_buf;
443 ctxp->rqb_buffer = (void *)nvmebuf;
444 spin_lock_init(&ctxp->ctxlock);
446 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
447 /* NOTE: isr time stamp is stale when context is re-assigned*/
448 if (ctxp->ts_isr_cmd) {
449 ctxp->ts_cmd_nvme = 0;
450 ctxp->ts_nvme_data = 0;
451 ctxp->ts_data_wqput = 0;
452 ctxp->ts_isr_data = 0;
453 ctxp->ts_data_nvme = 0;
454 ctxp->ts_nvme_status = 0;
455 ctxp->ts_status_wqput = 0;
456 ctxp->ts_isr_status = 0;
457 ctxp->ts_status_nvme = 0;
460 atomic_inc(&tgtp->rcv_fcp_cmd_in);
462 /* Indicate that a replacement buffer has been posted */
463 spin_lock_irqsave(&ctxp->ctxlock, iflag);
464 ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
465 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
467 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
468 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
469 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
470 "6181 Unable to queue deferred work "
472 "FCP Drop IO [x%x x%x x%x]\n",
474 atomic_read(&tgtp->rcv_fcp_cmd_in),
475 atomic_read(&tgtp->rcv_fcp_cmd_out),
476 atomic_read(&tgtp->xmt_fcp_release));
478 spin_lock_irqsave(&ctxp->ctxlock, iflag);
479 lpfc_nvmet_defer_release(phba, ctxp);
480 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
481 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
485 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
488 * Use the CPU context list, from the MRQ the IO was received on
489 * (ctxp->idx), to save context structure.
491 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
492 list_del_init(&ctxp->list);
493 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
494 cpu = raw_smp_processor_id();
495 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
496 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
497 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
498 infop->nvmet_ctx_list_cnt++;
499 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
503 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
505 lpfc_nvmet_ktime(struct lpfc_hba *phba,
506 struct lpfc_nvmet_rcv_ctx *ctxp)
508 uint64_t seg1, seg2, seg3, seg4, seg5;
509 uint64_t seg6, seg7, seg8, seg9, seg10;
512 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
513 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
514 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
515 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
516 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
519 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
521 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
523 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
525 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
527 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
529 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
531 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
533 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
535 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
537 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
540 * Segment 1 - Time from FCP command received by MSI-X ISR
541 * to FCP command is passed to NVME Layer.
542 * Segment 2 - Time from FCP command payload handed
543 * off to NVME Layer to Driver receives a Command op
545 * Segment 3 - Time from Driver receives a Command op
546 * from NVME Layer to Command is put on WQ.
547 * Segment 4 - Time from Driver WQ put is done
548 * to MSI-X ISR for Command cmpl.
549 * Segment 5 - Time from MSI-X ISR for Command cmpl to
550 * Command cmpl is passed to NVME Layer.
551 * Segment 6 - Time from Command cmpl is passed to NVME
552 * Layer to Driver receives a RSP op from NVME Layer.
553 * Segment 7 - Time from Driver receives a RSP op from
554 * NVME Layer to WQ put is done on TRSP FCP Status.
555 * Segment 8 - Time from Driver WQ put is done on TRSP
556 * FCP Status to MSI-X ISR for TRSP cmpl.
557 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
558 * TRSP cmpl is passed to NVME Layer.
559 * Segment 10 - Time from FCP command received by
560 * MSI-X ISR to command is completed on wire.
561 * (Segments 1 thru 8) for READDATA / WRITEDATA
562 * (Segments 1 thru 4) for READDATA_RSP
564 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
567 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
573 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
579 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
585 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
592 /* For auto rsp commands seg6 thru seg10 will be 0 */
593 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
594 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
600 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
606 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
612 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
618 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
620 seg10 = (ctxp->ts_isr_status -
623 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
629 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
632 phba->ktime_seg1_total += seg1;
633 if (seg1 < phba->ktime_seg1_min)
634 phba->ktime_seg1_min = seg1;
635 else if (seg1 > phba->ktime_seg1_max)
636 phba->ktime_seg1_max = seg1;
638 phba->ktime_seg2_total += seg2;
639 if (seg2 < phba->ktime_seg2_min)
640 phba->ktime_seg2_min = seg2;
641 else if (seg2 > phba->ktime_seg2_max)
642 phba->ktime_seg2_max = seg2;
644 phba->ktime_seg3_total += seg3;
645 if (seg3 < phba->ktime_seg3_min)
646 phba->ktime_seg3_min = seg3;
647 else if (seg3 > phba->ktime_seg3_max)
648 phba->ktime_seg3_max = seg3;
650 phba->ktime_seg4_total += seg4;
651 if (seg4 < phba->ktime_seg4_min)
652 phba->ktime_seg4_min = seg4;
653 else if (seg4 > phba->ktime_seg4_max)
654 phba->ktime_seg4_max = seg4;
656 phba->ktime_seg5_total += seg5;
657 if (seg5 < phba->ktime_seg5_min)
658 phba->ktime_seg5_min = seg5;
659 else if (seg5 > phba->ktime_seg5_max)
660 phba->ktime_seg5_max = seg5;
662 phba->ktime_data_samples++;
666 phba->ktime_seg6_total += seg6;
667 if (seg6 < phba->ktime_seg6_min)
668 phba->ktime_seg6_min = seg6;
669 else if (seg6 > phba->ktime_seg6_max)
670 phba->ktime_seg6_max = seg6;
672 phba->ktime_seg7_total += seg7;
673 if (seg7 < phba->ktime_seg7_min)
674 phba->ktime_seg7_min = seg7;
675 else if (seg7 > phba->ktime_seg7_max)
676 phba->ktime_seg7_max = seg7;
678 phba->ktime_seg8_total += seg8;
679 if (seg8 < phba->ktime_seg8_min)
680 phba->ktime_seg8_min = seg8;
681 else if (seg8 > phba->ktime_seg8_max)
682 phba->ktime_seg8_max = seg8;
684 phba->ktime_seg9_total += seg9;
685 if (seg9 < phba->ktime_seg9_min)
686 phba->ktime_seg9_min = seg9;
687 else if (seg9 > phba->ktime_seg9_max)
688 phba->ktime_seg9_max = seg9;
690 phba->ktime_seg10_total += seg10;
691 if (seg10 < phba->ktime_seg10_min)
692 phba->ktime_seg10_min = seg10;
693 else if (seg10 > phba->ktime_seg10_max)
694 phba->ktime_seg10_max = seg10;
695 phba->ktime_status_samples++;
700 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
701 * @phba: Pointer to HBA context object.
702 * @cmdwqe: Pointer to driver command WQE object.
703 * @wcqe: Pointer to driver response CQE object.
705 * The function is called from SLI ring event handler with no
706 * lock held. This function is the completion handler for NVME FCP commands
707 * The function frees memory resources used for the NVME commands.
710 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
711 struct lpfc_wcqe_complete *wcqe)
713 struct lpfc_nvmet_tgtport *tgtp;
714 struct nvmefc_tgt_fcp_req *rsp;
715 struct lpfc_nvmet_rcv_ctx *ctxp;
716 uint32_t status, result, op, start_clean, logerr;
717 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
721 ctxp = cmdwqe->context2;
722 ctxp->flag &= ~LPFC_NVMET_IO_INP;
724 rsp = &ctxp->ctx.fcp_req;
727 status = bf_get(lpfc_wcqe_c_status, wcqe);
728 result = wcqe->parameter;
730 if (phba->targetport)
731 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
735 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
736 ctxp->oxid, op, status);
739 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
740 rsp->transferred_length = 0;
742 atomic_inc(&tgtp->xmt_fcp_rsp_error);
743 if (result == IOERR_ABORT_REQUESTED)
744 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
747 logerr = LOG_NVME_IOERR;
749 /* pick up SLI4 exhange busy condition */
750 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
751 ctxp->flag |= LPFC_NVMET_XBUSY;
752 logerr |= LOG_NVME_ABTS;
754 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
757 ctxp->flag &= ~LPFC_NVMET_XBUSY;
760 lpfc_printf_log(phba, KERN_INFO, logerr,
761 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
763 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
764 status, result, ctxp->flag);
767 rsp->fcp_error = NVME_SC_SUCCESS;
768 if (op == NVMET_FCOP_RSP)
769 rsp->transferred_length = rsp->rsplen;
771 rsp->transferred_length = rsp->transfer_length;
773 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
776 if ((op == NVMET_FCOP_READDATA_RSP) ||
777 (op == NVMET_FCOP_RSP)) {
779 ctxp->state = LPFC_NVMET_STE_DONE;
782 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
783 if (ctxp->ts_cmd_nvme) {
784 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
786 cmdwqe->isr_timestamp;
789 ctxp->ts_nvme_status =
791 ctxp->ts_status_wqput =
793 ctxp->ts_isr_status =
795 ctxp->ts_status_nvme =
798 ctxp->ts_isr_status =
799 cmdwqe->isr_timestamp;
800 ctxp->ts_status_nvme =
806 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
807 if (ctxp->ts_cmd_nvme)
808 lpfc_nvmet_ktime(phba, ctxp);
810 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
813 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
814 memset(((char *)cmdwqe) + start_clean, 0,
815 (sizeof(struct lpfc_iocbq) - start_clean));
816 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
817 if (ctxp->ts_cmd_nvme) {
818 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
819 ctxp->ts_data_nvme = ktime_get_ns();
824 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
825 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
826 id = raw_smp_processor_id();
827 if (id < LPFC_CHECK_CPU_CNT) {
829 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
830 "6704 CPU Check cmdcmpl: "
831 "cpu %d expect %d\n",
833 phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
840 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
841 struct nvmefc_tgt_ls_req *rsp)
843 struct lpfc_nvmet_rcv_ctx *ctxp =
844 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
845 struct lpfc_hba *phba = ctxp->phba;
846 struct hbq_dmabuf *nvmebuf =
847 (struct hbq_dmabuf *)ctxp->rqb_buffer;
848 struct lpfc_iocbq *nvmewqeq;
849 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
850 struct lpfc_dmabuf dmabuf;
851 struct ulp_bde64 bpl;
854 if (phba->pport->load_flag & FC_UNLOADING)
857 if (phba->pport->load_flag & FC_UNLOADING)
860 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
861 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
863 if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
864 (ctxp->entry_cnt != 1)) {
865 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
866 "6412 NVMET LS rsp state mismatch "
868 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
870 ctxp->state = LPFC_NVMET_STE_LS_RSP;
873 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
875 if (nvmewqeq == NULL) {
876 atomic_inc(&nvmep->xmt_ls_drop);
877 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
878 "6150 LS Drop IO x%x: Prep\n",
880 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
881 atomic_inc(&nvmep->xmt_ls_abort);
882 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
883 ctxp->sid, ctxp->oxid);
887 /* Save numBdes for bpl2sgl */
889 nvmewqeq->hba_wqidx = 0;
890 nvmewqeq->context3 = &dmabuf;
892 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
893 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
894 bpl.tus.f.bdeSize = rsp->rsplen;
895 bpl.tus.f.bdeFlags = 0;
896 bpl.tus.w = le32_to_cpu(bpl.tus.w);
898 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
899 nvmewqeq->iocb_cmpl = NULL;
900 nvmewqeq->context2 = ctxp;
902 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
903 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
905 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
906 if (rc == WQE_SUCCESS) {
908 * Okay to repost buffer here, but wait till cmpl
909 * before freeing ctxp and iocbq.
911 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
912 atomic_inc(&nvmep->xmt_ls_rsp);
915 /* Give back resources */
916 atomic_inc(&nvmep->xmt_ls_drop);
917 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
918 "6151 LS Drop IO x%x: Issue %d\n",
921 lpfc_nlp_put(nvmewqeq->context1);
923 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
924 atomic_inc(&nvmep->xmt_ls_abort);
925 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
930 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
931 struct nvmefc_tgt_fcp_req *rsp)
933 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
934 struct lpfc_nvmet_rcv_ctx *ctxp =
935 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
936 struct lpfc_hba *phba = ctxp->phba;
937 struct lpfc_queue *wq;
938 struct lpfc_iocbq *nvmewqeq;
939 struct lpfc_sli_ring *pring;
940 unsigned long iflags;
943 if (phba->pport->load_flag & FC_UNLOADING) {
948 if (phba->pport->load_flag & FC_UNLOADING) {
953 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
954 if (ctxp->ts_cmd_nvme) {
955 if (rsp->op == NVMET_FCOP_RSP)
956 ctxp->ts_nvme_status = ktime_get_ns();
958 ctxp->ts_nvme_data = ktime_get_ns();
961 /* Setup the hdw queue if not already set */
963 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
965 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
966 int id = raw_smp_processor_id();
967 if (id < LPFC_CHECK_CPU_CNT) {
968 if (rsp->hwqid != id)
969 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
970 "6705 CPU Check OP: "
971 "cpu %d expect %d\n",
973 phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
975 ctxp->cpu = id; /* Setup cpu for cmpl check */
980 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
981 (ctxp->state == LPFC_NVMET_STE_ABORT)) {
982 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
983 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
984 "6102 IO oxid x%x aborted\n",
990 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
991 if (nvmewqeq == NULL) {
992 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
993 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
994 "6152 FCP Drop IO x%x: Prep\n",
1000 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
1001 nvmewqeq->iocb_cmpl = NULL;
1002 nvmewqeq->context2 = ctxp;
1003 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
1004 ctxp->wqeq->hba_wqidx = rsp->hwqid;
1006 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1007 ctxp->oxid, rsp->op, rsp->rsplen);
1009 ctxp->flag |= LPFC_NVMET_IO_INP;
1010 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1011 if (rc == WQE_SUCCESS) {
1012 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1013 if (!ctxp->ts_cmd_nvme)
1015 if (rsp->op == NVMET_FCOP_RSP)
1016 ctxp->ts_status_wqput = ktime_get_ns();
1018 ctxp->ts_data_wqput = ktime_get_ns();
1025 * WQ was full, so queue nvmewqeq to be sent after
1028 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
1029 wq = ctxp->hdwq->nvme_wq;
1031 spin_lock_irqsave(&pring->ring_lock, iflags);
1032 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1033 wq->q_flag |= HBA_NVMET_WQFULL;
1034 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1035 atomic_inc(&lpfc_nvmep->defer_wqfull);
1039 /* Give back resources */
1040 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1041 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1042 "6153 FCP Drop IO x%x: Issue: %d\n",
1045 ctxp->wqeq->hba_wqidx = 0;
1046 nvmewqeq->context2 = NULL;
1047 nvmewqeq->context3 = NULL;
1054 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1056 struct lpfc_nvmet_tgtport *tport = targetport->private;
1058 /* release any threads waiting for the unreg to complete */
1059 if (tport->phba->targetport)
1060 complete(tport->tport_unreg_cmp);
1064 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1065 struct nvmefc_tgt_fcp_req *req)
1067 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1068 struct lpfc_nvmet_rcv_ctx *ctxp =
1069 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1070 struct lpfc_hba *phba = ctxp->phba;
1071 struct lpfc_queue *wq;
1072 unsigned long flags;
1074 if (phba->pport->load_flag & FC_UNLOADING)
1077 if (phba->pport->load_flag & FC_UNLOADING)
1081 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1083 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1084 "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1085 ctxp->oxid, ctxp->flag, ctxp->state);
1087 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1088 ctxp->oxid, ctxp->flag, ctxp->state);
1090 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1092 spin_lock_irqsave(&ctxp->ctxlock, flags);
1094 /* Since iaab/iaar are NOT set, we need to check
1095 * if the firmware is in process of aborting IO
1097 if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) {
1098 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1101 ctxp->flag |= LPFC_NVMET_ABORT_OP;
1103 if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1104 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1105 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1107 wq = ctxp->hdwq->nvme_wq;
1108 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1111 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1113 /* An state of LPFC_NVMET_STE_RCV means we have just received
1114 * the NVME command and have not started processing it.
1115 * (by issuing any IO WQEs on this exchange yet)
1117 if (ctxp->state == LPFC_NVMET_STE_RCV)
1118 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1121 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1126 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1127 struct nvmefc_tgt_fcp_req *rsp)
1129 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1130 struct lpfc_nvmet_rcv_ctx *ctxp =
1131 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1132 struct lpfc_hba *phba = ctxp->phba;
1133 unsigned long flags;
1134 bool aborting = false;
1136 spin_lock_irqsave(&ctxp->ctxlock, flags);
1137 if (ctxp->flag & LPFC_NVMET_XBUSY)
1138 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1139 "6027 NVMET release with XBUSY flag x%x"
1141 ctxp->flag, ctxp->oxid);
1142 else if (ctxp->state != LPFC_NVMET_STE_DONE &&
1143 ctxp->state != LPFC_NVMET_STE_ABORT)
1144 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1145 "6413 NVMET release bad state %d %d oxid x%x\n",
1146 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1148 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1149 (ctxp->flag & LPFC_NVMET_XBUSY)) {
1151 /* let the abort path do the real release */
1152 lpfc_nvmet_defer_release(phba, ctxp);
1154 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1156 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1157 ctxp->state, aborting);
1159 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1160 ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
1165 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1169 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1170 struct nvmefc_tgt_fcp_req *rsp)
1172 struct lpfc_nvmet_tgtport *tgtp;
1173 struct lpfc_nvmet_rcv_ctx *ctxp =
1174 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1175 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1176 struct lpfc_hba *phba = ctxp->phba;
1177 unsigned long iflag;
1180 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1181 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1184 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1185 "6425 Defer rcv: no buffer oxid x%x: "
1187 ctxp->oxid, ctxp->flag, ctxp->state);
1191 tgtp = phba->targetport->private;
1193 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1195 /* Free the nvmebuf since a new buffer already replaced it */
1196 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1197 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1198 ctxp->rqb_buffer = NULL;
1199 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1203 lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1205 struct lpfc_nvmet_tgtport *tgtp;
1206 struct lpfc_hba *phba;
1209 tgtp = tgtport->private;
1212 rc = lpfc_issue_els_rscn(phba->pport, 0);
1213 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1214 "6420 NVMET subsystem change: Notification %s\n",
1215 (rc) ? "Failed" : "Sent");
1218 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1219 .targetport_delete = lpfc_nvmet_targetport_delete,
1220 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
1221 .fcp_op = lpfc_nvmet_xmt_fcp_op,
1222 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
1223 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1224 .defer_rcv = lpfc_nvmet_defer_rcv,
1225 .discovery_event = lpfc_nvmet_discovery_event,
1228 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1229 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1230 .dma_boundary = 0xFFFFFFFF,
1232 /* optional features */
1233 .target_features = 0,
1234 /* sizes of additional private data for data structures */
1235 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1239 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1240 struct lpfc_nvmet_ctx_info *infop)
1242 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1243 unsigned long flags;
1245 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1246 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1247 &infop->nvmet_ctx_list, list) {
1248 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1249 list_del_init(&ctx_buf->list);
1250 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1252 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1253 ctx_buf->sglq->state = SGL_FREED;
1254 ctx_buf->sglq->ndlp = NULL;
1256 spin_lock(&phba->sli4_hba.sgl_list_lock);
1257 list_add_tail(&ctx_buf->sglq->list,
1258 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1259 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1261 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1262 kfree(ctx_buf->context);
1264 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1268 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1270 struct lpfc_nvmet_ctx_info *infop;
1273 /* The first context list, MRQ 0 CPU 0 */
1274 infop = phba->sli4_hba.nvmet_ctx_info;
1278 /* Cycle the the entire CPU context list for every MRQ */
1279 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1280 for_each_present_cpu(j) {
1281 infop = lpfc_get_ctx_list(phba, j, i);
1282 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1285 kfree(phba->sli4_hba.nvmet_ctx_info);
1286 phba->sli4_hba.nvmet_ctx_info = NULL;
1290 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1292 struct lpfc_nvmet_ctxbuf *ctx_buf;
1293 struct lpfc_iocbq *nvmewqe;
1294 union lpfc_wqe128 *wqe;
1295 struct lpfc_nvmet_ctx_info *last_infop;
1296 struct lpfc_nvmet_ctx_info *infop;
1299 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1300 "6403 Allocate NVMET resources for %d XRIs\n",
1301 phba->sli4_hba.nvmet_xri_cnt);
1303 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1304 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1305 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1306 if (!phba->sli4_hba.nvmet_ctx_info) {
1307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1308 "6419 Failed allocate memory for "
1309 "nvmet context lists\n");
1314 * Assuming X CPUs in the system, and Y MRQs, allocate some
1315 * lpfc_nvmet_ctx_info structures as follows:
1317 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1318 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1320 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1322 * Each line represents a MRQ "silo" containing an entry for
1325 * MRQ X is initially assumed to be associated with CPU X, thus
1326 * contexts are initially distributed across all MRQs using
1327 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1328 * freed, the are freed to the MRQ silo based on the CPU number
1329 * of the IO completion. Thus a context that was allocated for MRQ A
1330 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1332 for_each_possible_cpu(i) {
1333 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1334 infop = lpfc_get_ctx_list(phba, i, j);
1335 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1336 spin_lock_init(&infop->nvmet_ctx_list_lock);
1337 infop->nvmet_ctx_list_cnt = 0;
1342 * Setup the next CPU context info ptr for each MRQ.
1343 * MRQ 0 will cycle thru CPUs 0 - X separately from
1344 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1346 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1347 last_infop = lpfc_get_ctx_list(phba,
1348 cpumask_first(cpu_present_mask),
1350 for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
1351 infop = lpfc_get_ctx_list(phba, i, j);
1352 infop->nvmet_ctx_next_cpu = last_infop;
1357 /* For all nvmet xris, allocate resources needed to process a
1358 * received command on a per xri basis.
1361 cpu = cpumask_first(cpu_present_mask);
1362 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1363 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1365 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1366 "6404 Ran out of memory for NVMET\n");
1370 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1372 if (!ctx_buf->context) {
1374 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1375 "6405 Ran out of NVMET "
1376 "context memory\n");
1379 ctx_buf->context->ctxbuf = ctx_buf;
1380 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1382 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1383 if (!ctx_buf->iocbq) {
1384 kfree(ctx_buf->context);
1386 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1387 "6406 Ran out of NVMET iocb/WQEs\n");
1390 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1391 nvmewqe = ctx_buf->iocbq;
1392 wqe = &nvmewqe->wqe;
1394 /* Initialize WQE */
1395 memset(wqe, 0, sizeof(union lpfc_wqe));
1397 ctx_buf->iocbq->context1 = NULL;
1398 spin_lock(&phba->sli4_hba.sgl_list_lock);
1399 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1400 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1401 if (!ctx_buf->sglq) {
1402 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1403 kfree(ctx_buf->context);
1405 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1406 "6407 Ran out of NVMET XRIs\n");
1409 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1412 * Add ctx to MRQidx context list. Our initial assumption
1413 * is MRQidx will be associated with CPUidx. This association
1414 * can change on the fly.
1416 infop = lpfc_get_ctx_list(phba, cpu, idx);
1417 spin_lock(&infop->nvmet_ctx_list_lock);
1418 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1419 infop->nvmet_ctx_list_cnt++;
1420 spin_unlock(&infop->nvmet_ctx_list_lock);
1422 /* Spread ctx structures evenly across all MRQs */
1424 if (idx >= phba->cfg_nvmet_mrq) {
1426 cpu = cpumask_first(cpu_present_mask);
1429 cpu = cpumask_next(cpu, cpu_present_mask);
1430 if (cpu == nr_cpu_ids)
1431 cpu = cpumask_first(cpu_present_mask);
1435 for_each_present_cpu(i) {
1436 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1437 infop = lpfc_get_ctx_list(phba, i, j);
1438 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1439 "6408 TOTAL NVMET ctx for CPU %d "
1440 "MRQ %d: cnt %d nextcpu %p\n",
1441 i, j, infop->nvmet_ctx_list_cnt,
1442 infop->nvmet_ctx_next_cpu);
1449 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1451 struct lpfc_vport *vport = phba->pport;
1452 struct lpfc_nvmet_tgtport *tgtp;
1453 struct nvmet_fc_port_info pinfo;
1456 if (phba->targetport)
1459 error = lpfc_nvmet_setup_io_context(phba);
1463 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1464 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1465 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1466 pinfo.port_id = vport->fc_myDID;
1468 /* We need to tell the transport layer + 1 because it takes page
1469 * alignment into account. When space for the SGL is allocated we
1470 * allocate + 3, one for cmd, one for rsp and one for this alignment
1472 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1473 lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1474 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1476 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1477 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1484 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1485 "6025 Cannot register NVME targetport x%x: "
1486 "portnm %llx nodenm %llx segs %d qs %d\n",
1488 pinfo.port_name, pinfo.node_name,
1489 lpfc_tgttemplate.max_sgl_segments,
1490 lpfc_tgttemplate.max_hw_queues);
1491 phba->targetport = NULL;
1492 phba->nvmet_support = 0;
1494 lpfc_nvmet_cleanup_io_context(phba);
1497 tgtp = (struct lpfc_nvmet_tgtport *)
1498 phba->targetport->private;
1501 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1502 "6026 Registered NVME "
1503 "targetport: %p, private %p "
1504 "portnm %llx nodenm %llx segs %d qs %d\n",
1505 phba->targetport, tgtp,
1506 pinfo.port_name, pinfo.node_name,
1507 lpfc_tgttemplate.max_sgl_segments,
1508 lpfc_tgttemplate.max_hw_queues);
1510 atomic_set(&tgtp->rcv_ls_req_in, 0);
1511 atomic_set(&tgtp->rcv_ls_req_out, 0);
1512 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1513 atomic_set(&tgtp->xmt_ls_abort, 0);
1514 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1515 atomic_set(&tgtp->xmt_ls_rsp, 0);
1516 atomic_set(&tgtp->xmt_ls_drop, 0);
1517 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1518 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1519 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1520 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1521 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1522 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1523 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1524 atomic_set(&tgtp->xmt_fcp_drop, 0);
1525 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1526 atomic_set(&tgtp->xmt_fcp_read, 0);
1527 atomic_set(&tgtp->xmt_fcp_write, 0);
1528 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1529 atomic_set(&tgtp->xmt_fcp_release, 0);
1530 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1531 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1532 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1533 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1534 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1535 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1536 atomic_set(&tgtp->xmt_fcp_abort, 0);
1537 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1538 atomic_set(&tgtp->xmt_abort_unsol, 0);
1539 atomic_set(&tgtp->xmt_abort_sol, 0);
1540 atomic_set(&tgtp->xmt_abort_rsp, 0);
1541 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1542 atomic_set(&tgtp->defer_ctx, 0);
1543 atomic_set(&tgtp->defer_fod, 0);
1544 atomic_set(&tgtp->defer_wqfull, 0);
1550 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1552 struct lpfc_vport *vport = phba->pport;
1554 if (!phba->targetport)
1557 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1558 "6007 Update NVMET port %p did x%x\n",
1559 phba->targetport, vport->fc_myDID);
1561 phba->targetport->port_id = vport->fc_myDID;
1566 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1567 * @phba: pointer to lpfc hba data structure.
1568 * @axri: pointer to the nvmet xri abort wcqe structure.
1570 * This routine is invoked by the worker thread to process a SLI4 fast-path
1571 * NVMET aborted xri.
1574 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1575 struct sli4_wcqe_xri_aborted *axri)
1577 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1578 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1579 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1580 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1581 struct lpfc_nvmet_tgtport *tgtp;
1582 struct nvmefc_tgt_fcp_req *req = NULL;
1583 struct lpfc_nodelist *ndlp;
1584 unsigned long iflag = 0;
1586 bool released = false;
1588 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1589 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1591 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1594 if (phba->targetport) {
1595 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1596 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1599 spin_lock_irqsave(&phba->hbalock, iflag);
1600 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1601 list_for_each_entry_safe(ctxp, next_ctxp,
1602 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1604 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1607 spin_lock(&ctxp->ctxlock);
1608 /* Check if we already received a free context call
1609 * and we have completed processing an abort situation.
1611 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1612 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1613 list_del_init(&ctxp->list);
1616 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1617 spin_unlock(&ctxp->ctxlock);
1618 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1620 rrq_empty = list_empty(&phba->active_rrq_list);
1621 spin_unlock_irqrestore(&phba->hbalock, iflag);
1622 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1623 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1624 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1625 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1626 lpfc_set_rrq_active(phba, ndlp,
1627 ctxp->ctxbuf->sglq->sli4_lxritag,
1629 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1632 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1633 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1634 ctxp->oxid, ctxp->flag, released);
1636 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1639 lpfc_worker_wake_up(phba);
1642 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1643 spin_unlock_irqrestore(&phba->hbalock, iflag);
1645 ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1648 * Abort already done by FW, so BA_ACC sent.
1649 * However, the transport may be unaware.
1651 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1652 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1653 "flag x%x oxid x%x rxid x%x\n",
1654 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1657 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1658 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1659 ctxp->state = LPFC_NVMET_STE_ABORT;
1660 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1662 lpfc_nvmeio_data(phba,
1663 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1664 xri, raw_smp_processor_id(), 0);
1666 req = &ctxp->ctx.fcp_req;
1668 nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1674 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1675 struct fc_frame_header *fc_hdr)
1677 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1678 struct lpfc_hba *phba = vport->phba;
1679 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1680 struct nvmefc_tgt_fcp_req *rsp;
1683 unsigned long iflag = 0;
1685 sid = sli4_sid_from_fc_hdr(fc_hdr);
1686 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1688 spin_lock_irqsave(&phba->hbalock, iflag);
1689 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1690 list_for_each_entry_safe(ctxp, next_ctxp,
1691 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1693 if (ctxp->oxid != oxid || ctxp->sid != sid)
1696 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1698 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1699 spin_unlock_irqrestore(&phba->hbalock, iflag);
1701 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1702 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1703 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1705 lpfc_nvmeio_data(phba,
1706 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1707 xri, raw_smp_processor_id(), 0);
1709 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1710 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1712 rsp = &ctxp->ctx.fcp_req;
1713 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1715 /* Respond with BA_ACC accordingly */
1716 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1719 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1720 spin_unlock_irqrestore(&phba->hbalock, iflag);
1722 /* check the wait list */
1723 if (phba->sli4_hba.nvmet_io_wait_cnt) {
1724 struct rqb_dmabuf *nvmebuf;
1725 struct fc_frame_header *fc_hdr_tmp;
1730 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1732 /* match by oxid and s_id */
1733 list_for_each_entry(nvmebuf,
1734 &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1736 fc_hdr_tmp = (struct fc_frame_header *)
1737 (nvmebuf->hbuf.virt);
1738 oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1739 sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1740 if (oxid_tmp != oxid || sid_tmp != sid)
1743 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1744 "6321 NVMET Rcv ABTS oxid x%x from x%x "
1745 "is waiting for a ctxp\n",
1748 list_del_init(&nvmebuf->hbuf.list);
1749 phba->sli4_hba.nvmet_io_wait_cnt--;
1753 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1756 /* free buffer since already posted a new DMA buffer to RQ */
1758 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1759 /* Respond with BA_ACC accordingly */
1760 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1765 /* check active list */
1766 ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1768 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1770 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1771 ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP);
1772 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1774 lpfc_nvmeio_data(phba,
1775 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1776 xri, raw_smp_processor_id(), 0);
1778 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1779 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1780 "flag x%x state x%x\n",
1781 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1783 if (ctxp->flag & LPFC_NVMET_TNOTIFY) {
1784 /* Notify the transport */
1785 nvmet_fc_rcv_fcp_abort(phba->targetport,
1786 &ctxp->ctx.fcp_req);
1788 cancel_work_sync(&ctxp->ctxbuf->defer_work);
1789 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1790 lpfc_nvmet_defer_release(phba, ctxp);
1791 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1793 if (ctxp->state == LPFC_NVMET_STE_RCV)
1794 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1797 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1800 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1804 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1805 oxid, raw_smp_processor_id(), 1);
1807 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1808 "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1810 /* Respond with BA_RJT accordingly */
1811 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1817 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1818 struct lpfc_nvmet_rcv_ctx *ctxp)
1820 struct lpfc_sli_ring *pring;
1821 struct lpfc_iocbq *nvmewqeq;
1822 struct lpfc_iocbq *next_nvmewqeq;
1823 unsigned long iflags;
1824 struct lpfc_wcqe_complete wcqe;
1825 struct lpfc_wcqe_complete *wcqep;
1830 /* Fake an ABORT error code back to cmpl routine */
1831 memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1832 bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1833 wcqep->parameter = IOERR_ABORT_REQUESTED;
1835 spin_lock_irqsave(&pring->ring_lock, iflags);
1836 list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1837 &wq->wqfull_list, list) {
1839 /* Checking for a specific IO to flush */
1840 if (nvmewqeq->context2 == ctxp) {
1841 list_del(&nvmewqeq->list);
1842 spin_unlock_irqrestore(&pring->ring_lock,
1844 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1851 list_del(&nvmewqeq->list);
1852 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1853 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1854 spin_lock_irqsave(&pring->ring_lock, iflags);
1858 wq->q_flag &= ~HBA_NVMET_WQFULL;
1859 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1863 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1864 struct lpfc_queue *wq)
1866 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1867 struct lpfc_sli_ring *pring;
1868 struct lpfc_iocbq *nvmewqeq;
1869 struct lpfc_nvmet_rcv_ctx *ctxp;
1870 unsigned long iflags;
1874 * Some WQE slots are available, so try to re-issue anything
1875 * on the WQ wqfull_list.
1878 spin_lock_irqsave(&pring->ring_lock, iflags);
1879 while (!list_empty(&wq->wqfull_list)) {
1880 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1882 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1883 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
1884 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1885 spin_lock_irqsave(&pring->ring_lock, iflags);
1887 /* WQ was full again, so put it back on the list */
1888 list_add(&nvmewqeq->list, &wq->wqfull_list);
1889 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1892 if (rc == WQE_SUCCESS) {
1893 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1894 if (ctxp->ts_cmd_nvme) {
1895 if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP)
1896 ctxp->ts_status_wqput = ktime_get_ns();
1898 ctxp->ts_data_wqput = ktime_get_ns();
1905 wq->q_flag &= ~HBA_NVMET_WQFULL;
1906 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1912 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1914 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1915 struct lpfc_nvmet_tgtport *tgtp;
1916 struct lpfc_queue *wq;
1918 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
1920 if (phba->nvmet_support == 0)
1922 if (phba->targetport) {
1923 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1924 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
1925 wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
1926 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1928 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
1929 nvmet_fc_unregister_targetport(phba->targetport);
1930 if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
1931 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
1932 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1933 "6179 Unreg targetport %p timeout "
1934 "reached.\n", phba->targetport);
1935 lpfc_nvmet_cleanup_io_context(phba);
1937 phba->targetport = NULL;
1942 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1943 * @phba: pointer to lpfc hba data structure.
1944 * @pring: pointer to a SLI ring.
1945 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1947 * This routine is used for processing the WQE associated with a unsolicited
1948 * event. It first determines whether there is an existing ndlp that matches
1949 * the DID from the unsolicited WQE. If not, it will create a new one with
1950 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1951 * WQE is then used to invoke the proper routine and to set up proper state
1952 * of the discovery state machine.
1955 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1956 struct hbq_dmabuf *nvmebuf)
1958 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1959 struct lpfc_nvmet_tgtport *tgtp;
1960 struct fc_frame_header *fc_hdr;
1961 struct lpfc_nvmet_rcv_ctx *ctxp;
1963 uint32_t size, oxid, sid, rc;
1965 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1966 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1968 if (!phba->targetport) {
1969 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1970 "6154 LS Drop IO x%x\n", oxid);
1978 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1979 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1980 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
1981 sid = sli4_sid_from_fc_hdr(fc_hdr);
1983 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1985 atomic_inc(&tgtp->rcv_ls_req_drop);
1986 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1987 "6155 LS Drop IO x%x: Alloc\n",
1990 lpfc_nvmeio_data(phba, "NVMET LS DROP: "
1991 "xri x%x sz %d from %06x\n",
1993 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2001 ctxp->state = LPFC_NVMET_STE_LS_RCV;
2002 ctxp->entry_cnt = 1;
2003 ctxp->rqb_buffer = (void *)nvmebuf;
2004 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
2006 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
2009 * The calling sequence should be:
2010 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
2011 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
2013 atomic_inc(&tgtp->rcv_ls_req_in);
2014 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
2017 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2018 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2019 "%08x %08x %08x\n", size, rc,
2020 *payload, *(payload+1), *(payload+2),
2021 *(payload+3), *(payload+4), *(payload+5));
2024 atomic_inc(&tgtp->rcv_ls_req_out);
2028 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
2031 atomic_inc(&tgtp->rcv_ls_req_drop);
2032 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2033 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
2036 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
2037 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2039 atomic_inc(&tgtp->xmt_ls_abort);
2040 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
2045 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2047 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2048 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
2049 struct lpfc_hba *phba = ctxp->phba;
2050 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2051 struct lpfc_nvmet_tgtport *tgtp;
2052 uint32_t *payload, qno;
2054 unsigned long iflags;
2057 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2058 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2059 "oxid: x%x flg: x%x state: x%x\n",
2060 ctxp->oxid, ctxp->flag, ctxp->state);
2061 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2062 lpfc_nvmet_defer_release(phba, ctxp);
2063 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2064 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2069 if (ctxp->flag & LPFC_NVMET_ABTS_RCV) {
2070 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2071 "6324 IO oxid x%x aborted\n",
2076 payload = (uint32_t *)(nvmebuf->dbuf.virt);
2077 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2078 ctxp->flag |= LPFC_NVMET_TNOTIFY;
2079 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2080 if (ctxp->ts_isr_cmd)
2081 ctxp->ts_cmd_nvme = ktime_get_ns();
2084 * The calling sequence should be:
2085 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2086 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2087 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2088 * the NVME command / FC header is stored.
2089 * A buffer has already been reposted for this IO, so just free
2092 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
2093 payload, ctxp->size);
2094 /* Process FCP command */
2096 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2097 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2098 if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
2099 (nvmebuf != ctxp->rqb_buffer)) {
2100 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2103 ctxp->rqb_buffer = NULL;
2104 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2105 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2109 /* Processing of FCP command is deferred */
2110 if (rc == -EOVERFLOW) {
2111 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2113 ctxp->oxid, ctxp->size, ctxp->sid);
2114 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2115 atomic_inc(&tgtp->defer_fod);
2116 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2117 if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
2118 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2121 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2123 * Post a replacement DMA buffer to RQ and defer
2124 * freeing rcv buffer till .defer_rcv callback
2127 lpfc_post_rq_buffer(
2128 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2129 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2132 ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
2133 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2134 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2135 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2137 atomic_read(&tgtp->rcv_fcp_cmd_in),
2138 atomic_read(&tgtp->rcv_fcp_cmd_out),
2139 atomic_read(&tgtp->xmt_fcp_release));
2140 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2141 ctxp->oxid, ctxp->size, ctxp->sid);
2142 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2143 lpfc_nvmet_defer_release(phba, ctxp);
2144 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2145 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2150 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2152 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2153 struct lpfc_nvmet_ctxbuf *ctx_buf =
2154 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2156 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2160 static struct lpfc_nvmet_ctxbuf *
2161 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2162 struct lpfc_nvmet_ctx_info *current_infop)
2164 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2165 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2166 struct lpfc_nvmet_ctx_info *get_infop;
2170 * The current_infop for the MRQ a NVME command IU was received
2171 * on is empty. Our goal is to replenish this MRQs context
2172 * list from a another CPUs.
2174 * First we need to pick a context list to start looking on.
2175 * nvmet_ctx_start_cpu has available context the last time
2176 * we needed to replenish this CPU where nvmet_ctx_next_cpu
2177 * is just the next sequential CPU for this MRQ.
2179 if (current_infop->nvmet_ctx_start_cpu)
2180 get_infop = current_infop->nvmet_ctx_start_cpu;
2182 get_infop = current_infop->nvmet_ctx_next_cpu;
2184 for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2185 if (get_infop == current_infop) {
2186 get_infop = get_infop->nvmet_ctx_next_cpu;
2189 spin_lock(&get_infop->nvmet_ctx_list_lock);
2191 /* Just take the entire context list, if there are any */
2192 if (get_infop->nvmet_ctx_list_cnt) {
2193 list_splice_init(&get_infop->nvmet_ctx_list,
2194 ¤t_infop->nvmet_ctx_list);
2195 current_infop->nvmet_ctx_list_cnt =
2196 get_infop->nvmet_ctx_list_cnt - 1;
2197 get_infop->nvmet_ctx_list_cnt = 0;
2198 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2200 current_infop->nvmet_ctx_start_cpu = get_infop;
2201 list_remove_head(¤t_infop->nvmet_ctx_list,
2202 ctx_buf, struct lpfc_nvmet_ctxbuf,
2207 /* Otherwise, move on to the next CPU for this MRQ */
2208 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2209 get_infop = get_infop->nvmet_ctx_next_cpu;
2213 /* Nothing found, all contexts for the MRQ are in-flight */
2218 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2219 * @phba: pointer to lpfc hba data structure.
2220 * @idx: relative index of MRQ vector
2221 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2222 * @isr_timestamp: in jiffies.
2223 * @cqflag: cq processing information regarding workload.
2225 * This routine is used for processing the WQE associated with a unsolicited
2226 * event. It first determines whether there is an existing ndlp that matches
2227 * the DID from the unsolicited WQE. If not, it will create a new one with
2228 * the DID from the unsolicited WQE. The ELS command from the unsolicited
2229 * WQE is then used to invoke the proper routine and to set up proper state
2230 * of the discovery state machine.
2233 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2235 struct rqb_dmabuf *nvmebuf,
2236 uint64_t isr_timestamp,
2239 struct lpfc_nvmet_rcv_ctx *ctxp;
2240 struct lpfc_nvmet_tgtport *tgtp;
2241 struct fc_frame_header *fc_hdr;
2242 struct lpfc_nvmet_ctxbuf *ctx_buf;
2243 struct lpfc_nvmet_ctx_info *current_infop;
2244 uint32_t size, oxid, sid, qno;
2245 unsigned long iflag;
2248 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2252 if (!nvmebuf || !phba->targetport) {
2253 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2254 "6157 NVMET FCP Drop IO\n");
2256 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2261 * Get a pointer to the context list for this MRQ based on
2262 * the CPU this MRQ IRQ is associated with. If the CPU association
2263 * changes from our initial assumption, the context list could
2264 * be empty, thus it would need to be replenished with the
2265 * context list from another CPU for this MRQ.
2267 current_cpu = raw_smp_processor_id();
2268 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2269 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
2270 if (current_infop->nvmet_ctx_list_cnt) {
2271 list_remove_head(¤t_infop->nvmet_ctx_list,
2272 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2273 current_infop->nvmet_ctx_list_cnt--;
2275 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2277 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
2279 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2280 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2281 size = nvmebuf->bytes_recv;
2283 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2284 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
2285 if (current_cpu < LPFC_CHECK_CPU_CNT) {
2286 if (idx != current_cpu)
2287 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2288 "6703 CPU Check rcv: "
2289 "cpu %d expect %d\n",
2291 phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
2296 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2297 oxid, size, raw_smp_processor_id());
2299 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2302 /* Queue this NVME IO to process later */
2303 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2304 list_add_tail(&nvmebuf->hbuf.list,
2305 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2306 phba->sli4_hba.nvmet_io_wait_cnt++;
2307 phba->sli4_hba.nvmet_io_wait_total++;
2308 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2311 /* Post a brand new DMA buffer to RQ */
2313 lpfc_post_rq_buffer(
2314 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2315 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2317 atomic_inc(&tgtp->defer_ctx);
2321 sid = sli4_sid_from_fc_hdr(fc_hdr);
2323 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
2324 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2325 list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2326 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2327 if (ctxp->state != LPFC_NVMET_STE_FREE) {
2328 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2329 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2330 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2340 ctxp->state = LPFC_NVMET_STE_RCV;
2341 ctxp->entry_cnt = 1;
2343 ctxp->ctxbuf = ctx_buf;
2344 ctxp->rqb_buffer = (void *)nvmebuf;
2346 spin_lock_init(&ctxp->ctxlock);
2348 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2350 ctxp->ts_isr_cmd = isr_timestamp;
2351 ctxp->ts_cmd_nvme = 0;
2352 ctxp->ts_nvme_data = 0;
2353 ctxp->ts_data_wqput = 0;
2354 ctxp->ts_isr_data = 0;
2355 ctxp->ts_data_nvme = 0;
2356 ctxp->ts_nvme_status = 0;
2357 ctxp->ts_status_wqput = 0;
2358 ctxp->ts_isr_status = 0;
2359 ctxp->ts_status_nvme = 0;
2362 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2363 /* check for cq processing load */
2365 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2369 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2370 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2371 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2372 "6325 Unable to queue work for oxid x%x. "
2373 "FCP Drop IO [x%x x%x x%x]\n",
2375 atomic_read(&tgtp->rcv_fcp_cmd_in),
2376 atomic_read(&tgtp->rcv_fcp_cmd_out),
2377 atomic_read(&tgtp->xmt_fcp_release));
2379 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2380 lpfc_nvmet_defer_release(phba, ctxp);
2381 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2382 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2387 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2388 * @phba: pointer to lpfc hba data structure.
2389 * @pring: pointer to a SLI ring.
2390 * @nvmebuf: pointer to received nvme data structure.
2392 * This routine is used to process an unsolicited event received from a SLI
2393 * (Service Level Interface) ring. The actual processing of the data buffer
2394 * associated with the unsolicited event is done by invoking the routine
2395 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2396 * SLI RQ on which the unsolicited event was received.
2399 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2400 struct lpfc_iocbq *piocb)
2402 struct lpfc_dmabuf *d_buf;
2403 struct hbq_dmabuf *nvmebuf;
2405 d_buf = piocb->context2;
2406 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2408 if (phba->nvmet_support == 0) {
2409 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2412 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2416 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2417 * @phba: pointer to lpfc hba data structure.
2418 * @idx: relative index of MRQ vector
2419 * @nvmebuf: pointer to received nvme data structure.
2420 * @isr_timestamp: in jiffies.
2421 * @cqflag: cq processing information regarding workload.
2423 * This routine is used to process an unsolicited event received from a SLI
2424 * (Service Level Interface) ring. The actual processing of the data buffer
2425 * associated with the unsolicited event is done by invoking the routine
2426 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2427 * SLI RQ on which the unsolicited event was received.
2430 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2432 struct rqb_dmabuf *nvmebuf,
2433 uint64_t isr_timestamp,
2436 if (phba->nvmet_support == 0) {
2437 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2440 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2444 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2445 * @phba: pointer to a host N_Port data structure.
2446 * @ctxp: Context info for NVME LS Request
2447 * @rspbuf: DMA buffer of NVME command.
2448 * @rspsize: size of the NVME command.
2450 * This routine is used for allocating a lpfc-WQE data structure from
2451 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2452 * passed into the routine for discovery state machine to issue an Extended
2453 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2454 * and preparation routine that is used by all the discovery state machine
2455 * routines and the NVME command-specific fields will be later set up by
2456 * the individual discovery machine routines after calling this routine
2457 * allocating and preparing a generic WQE data structure. It fills in the
2458 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2459 * payload and response payload (if expected). The reference count on the
2460 * ndlp is incremented by 1 and the reference to the ndlp is put into
2461 * context1 of the WQE data structure for this WQE to hold the ndlp
2462 * reference for the command's callback function to access later.
2465 * Pointer to the newly allocated/prepared nvme wqe data structure
2466 * NULL - when nvme wqe data structure allocation/preparation failed
2468 static struct lpfc_iocbq *
2469 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2470 struct lpfc_nvmet_rcv_ctx *ctxp,
2471 dma_addr_t rspbuf, uint16_t rspsize)
2473 struct lpfc_nodelist *ndlp;
2474 struct lpfc_iocbq *nvmewqe;
2475 union lpfc_wqe128 *wqe;
2477 if (!lpfc_is_link_up(phba)) {
2478 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2479 "6104 NVMET prep LS wqe: link err: "
2480 "NPORT x%x oxid:x%x ste %d\n",
2481 ctxp->sid, ctxp->oxid, ctxp->state);
2485 /* Allocate buffer for command wqe */
2486 nvmewqe = lpfc_sli_get_iocbq(phba);
2487 if (nvmewqe == NULL) {
2488 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2489 "6105 NVMET prep LS wqe: No WQE: "
2490 "NPORT x%x oxid x%x ste %d\n",
2491 ctxp->sid, ctxp->oxid, ctxp->state);
2495 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2496 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2497 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2498 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2499 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2500 "6106 NVMET prep LS wqe: No ndlp: "
2501 "NPORT x%x oxid x%x ste %d\n",
2502 ctxp->sid, ctxp->oxid, ctxp->state);
2503 goto nvme_wqe_free_wqeq_exit;
2505 ctxp->wqeq = nvmewqe;
2507 /* prevent preparing wqe with NULL ndlp reference */
2508 nvmewqe->context1 = lpfc_nlp_get(ndlp);
2509 if (nvmewqe->context1 == NULL)
2510 goto nvme_wqe_free_wqeq_exit;
2511 nvmewqe->context2 = ctxp;
2513 wqe = &nvmewqe->wqe;
2514 memset(wqe, 0, sizeof(union lpfc_wqe));
2517 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2518 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2519 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2520 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2527 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2528 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2529 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2530 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2531 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2534 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2535 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2536 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2539 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2540 CMD_XMIT_SEQUENCE64_WQE);
2541 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2542 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2543 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2546 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2549 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2550 /* Needs to be set by caller */
2551 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2554 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2555 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2556 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2557 LPFC_WQE_LENLOC_WORD12);
2558 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2561 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2562 LPFC_WQE_CQ_ID_DEFAULT);
2563 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2567 wqe->xmit_sequence.xmit_len = rspsize;
2570 nvmewqe->vport = phba->pport;
2571 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2572 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2574 /* Xmit NVMET response to remote NPORT <did> */
2575 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2576 "6039 Xmit NVMET LS response to remote "
2577 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2578 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2582 nvme_wqe_free_wqeq_exit:
2583 nvmewqe->context2 = NULL;
2584 nvmewqe->context3 = NULL;
2585 lpfc_sli_release_iocbq(phba, nvmewqe);
2590 static struct lpfc_iocbq *
2591 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2592 struct lpfc_nvmet_rcv_ctx *ctxp)
2594 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2595 struct lpfc_nvmet_tgtport *tgtp;
2596 struct sli4_sge *sgl;
2597 struct lpfc_nodelist *ndlp;
2598 struct lpfc_iocbq *nvmewqe;
2599 struct scatterlist *sgel;
2600 union lpfc_wqe128 *wqe;
2601 struct ulp_bde64 *bde;
2603 dma_addr_t physaddr;
2608 if (!lpfc_is_link_up(phba)) {
2609 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2610 "6107 NVMET prep FCP wqe: link err:"
2611 "NPORT x%x oxid x%x ste %d\n",
2612 ctxp->sid, ctxp->oxid, ctxp->state);
2616 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2617 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2618 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2619 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2620 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2621 "6108 NVMET prep FCP wqe: no ndlp: "
2622 "NPORT x%x oxid x%x ste %d\n",
2623 ctxp->sid, ctxp->oxid, ctxp->state);
2627 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2628 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2629 "6109 NVMET prep FCP wqe: seg cnt err: "
2630 "NPORT x%x oxid x%x ste %d cnt %d\n",
2631 ctxp->sid, ctxp->oxid, ctxp->state,
2632 phba->cfg_nvme_seg_cnt);
2636 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2637 nvmewqe = ctxp->wqeq;
2638 if (nvmewqe == NULL) {
2639 /* Allocate buffer for command wqe */
2640 nvmewqe = ctxp->ctxbuf->iocbq;
2641 if (nvmewqe == NULL) {
2642 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2643 "6110 NVMET prep FCP wqe: No "
2644 "WQE: NPORT x%x oxid x%x ste %d\n",
2645 ctxp->sid, ctxp->oxid, ctxp->state);
2648 ctxp->wqeq = nvmewqe;
2649 xc = 0; /* create new XRI */
2650 nvmewqe->sli4_lxritag = NO_XRI;
2651 nvmewqe->sli4_xritag = NO_XRI;
2655 if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2656 (ctxp->entry_cnt == 1)) ||
2657 (ctxp->state == LPFC_NVMET_STE_DATA)) {
2658 wqe = &nvmewqe->wqe;
2660 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2661 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2662 ctxp->state, ctxp->entry_cnt);
2666 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2668 case NVMET_FCOP_READDATA:
2669 case NVMET_FCOP_READDATA_RSP:
2670 /* From the tsend template, initialize words 7 - 11 */
2671 memcpy(&wqe->words[7],
2672 &lpfc_tsend_cmd_template.words[7],
2673 sizeof(uint32_t) * 5);
2675 /* Words 0 - 2 : The first sg segment */
2677 physaddr = sg_dma_address(sgel);
2678 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2679 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2680 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2681 wqe->fcp_tsend.bde.addrHigh =
2682 cpu_to_le32(putPaddrHigh(physaddr));
2685 wqe->fcp_tsend.payload_offset_len = 0;
2688 wqe->fcp_tsend.relative_offset = ctxp->offset;
2691 wqe->fcp_tsend.reserved = 0;
2694 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2695 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2696 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2697 nvmewqe->sli4_xritag);
2699 /* Word 7 - set ar later */
2702 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2705 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2706 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2708 /* Word 10 - set wqes later, in template xc=1 */
2710 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2712 /* Word 11 - set sup, irsp, irsplen later */
2716 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2718 /* Setup 2 SKIP SGEs */
2722 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2723 sgl->word2 = cpu_to_le32(sgl->word2);
2729 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2730 sgl->word2 = cpu_to_le32(sgl->word2);
2733 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2734 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2736 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2738 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2739 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2741 &wqe->fcp_tsend.wqe_com, 1);
2743 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2744 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2745 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2746 ((rsp->rsplen >> 2) - 1));
2747 memcpy(&wqe->words[16], rsp->rspaddr,
2751 atomic_inc(&tgtp->xmt_fcp_read);
2753 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2754 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2758 case NVMET_FCOP_WRITEDATA:
2759 /* From the treceive template, initialize words 3 - 11 */
2760 memcpy(&wqe->words[3],
2761 &lpfc_treceive_cmd_template.words[3],
2762 sizeof(uint32_t) * 9);
2764 /* Words 0 - 2 : The first sg segment */
2765 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2766 GFP_KERNEL, &physaddr);
2768 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2769 "6041 Bad txrdy buffer: oxid x%x\n",
2773 ctxp->txrdy = txrdy;
2774 ctxp->txrdy_phys = physaddr;
2775 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2776 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2777 wqe->fcp_treceive.bde.addrLow =
2778 cpu_to_le32(putPaddrLow(physaddr));
2779 wqe->fcp_treceive.bde.addrHigh =
2780 cpu_to_le32(putPaddrHigh(physaddr));
2783 wqe->fcp_treceive.relative_offset = ctxp->offset;
2786 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2787 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2788 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2789 nvmewqe->sli4_xritag);
2794 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2797 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2798 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2800 /* Word 10 - in template xc=1 */
2802 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2804 /* Word 11 - set pbde later */
2805 if (phba->cfg_enable_pbde) {
2808 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2813 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2815 /* Setup 1 TXRDY and 1 SKIP SGE */
2817 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2820 sgl->addr_hi = putPaddrHigh(physaddr);
2821 sgl->addr_lo = putPaddrLow(physaddr);
2823 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2824 sgl->word2 = cpu_to_le32(sgl->word2);
2825 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2830 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2831 sgl->word2 = cpu_to_le32(sgl->word2);
2834 atomic_inc(&tgtp->xmt_fcp_write);
2837 case NVMET_FCOP_RSP:
2838 /* From the treceive template, initialize words 4 - 11 */
2839 memcpy(&wqe->words[4],
2840 &lpfc_trsp_cmd_template.words[4],
2841 sizeof(uint32_t) * 8);
2844 physaddr = rsp->rspdma;
2845 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2846 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2847 wqe->fcp_trsp.bde.addrLow =
2848 cpu_to_le32(putPaddrLow(physaddr));
2849 wqe->fcp_trsp.bde.addrHigh =
2850 cpu_to_le32(putPaddrHigh(physaddr));
2853 wqe->fcp_trsp.response_len = rsp->rsplen;
2856 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2857 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2858 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2859 nvmewqe->sli4_xritag);
2864 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2867 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2868 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2872 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2875 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2876 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2877 /* Bad response - embed it */
2878 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2879 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2880 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2881 ((rsp->rsplen >> 2) - 1));
2882 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2887 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2889 /* Use rspbuf, NOT sg list */
2892 atomic_inc(&tgtp->xmt_fcp_rsp);
2896 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2897 "6064 Unknown Rsp Op %d\n",
2903 nvmewqe->vport = phba->pport;
2904 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2905 nvmewqe->context1 = ndlp;
2907 for (i = 0; i < rsp->sg_cnt; i++) {
2909 physaddr = sg_dma_address(sgel);
2910 cnt = sg_dma_len(sgel);
2911 sgl->addr_hi = putPaddrHigh(physaddr);
2912 sgl->addr_lo = putPaddrLow(physaddr);
2914 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2915 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2916 if ((i+1) == rsp->sg_cnt)
2917 bf_set(lpfc_sli4_sge_last, sgl, 1);
2918 sgl->word2 = cpu_to_le32(sgl->word2);
2919 sgl->sge_len = cpu_to_le32(cnt);
2921 bde = (struct ulp_bde64 *)&wqe->words[13];
2923 /* Words 13-15 (PBDE) */
2924 bde->addrLow = sgl->addr_lo;
2925 bde->addrHigh = sgl->addr_hi;
2926 bde->tus.f.bdeSize =
2927 le32_to_cpu(sgl->sge_len);
2928 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2929 bde->tus.w = cpu_to_le32(bde->tus.w);
2931 memset(bde, 0, sizeof(struct ulp_bde64));
2935 ctxp->offset += cnt;
2937 ctxp->state = LPFC_NVMET_STE_DATA;
2943 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2944 * @phba: Pointer to HBA context object.
2945 * @cmdwqe: Pointer to driver command WQE object.
2946 * @wcqe: Pointer to driver response CQE object.
2948 * The function is called from SLI ring event handler with no
2949 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2950 * The function frees memory resources used for the NVME commands.
2953 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2954 struct lpfc_wcqe_complete *wcqe)
2956 struct lpfc_nvmet_rcv_ctx *ctxp;
2957 struct lpfc_nvmet_tgtport *tgtp;
2959 unsigned long flags;
2960 bool released = false;
2962 ctxp = cmdwqe->context2;
2963 result = wcqe->parameter;
2965 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2966 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2967 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2969 spin_lock_irqsave(&ctxp->ctxlock, flags);
2970 ctxp->state = LPFC_NVMET_STE_DONE;
2972 /* Check if we already received a free context call
2973 * and we have completed processing an abort situation.
2975 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2976 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2977 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2978 list_del_init(&ctxp->list);
2979 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2982 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2983 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2984 atomic_inc(&tgtp->xmt_abort_rsp);
2986 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2987 "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
2988 "WCQE: %08x %08x %08x %08x\n",
2989 ctxp->oxid, ctxp->flag, released,
2990 wcqe->word0, wcqe->total_data_placed,
2991 result, wcqe->word3);
2993 cmdwqe->context2 = NULL;
2994 cmdwqe->context3 = NULL;
2996 * if transport has released ctx, then can reuse it. Otherwise,
2997 * will be recycled by transport release call.
3000 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3002 /* This is the iocbq for the abort, not the command */
3003 lpfc_sli_release_iocbq(phba, cmdwqe);
3005 /* Since iaab/iaar are NOT set, there is no work left.
3006 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
3007 * should have been called already.
3012 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
3013 * @phba: Pointer to HBA context object.
3014 * @cmdwqe: Pointer to driver command WQE object.
3015 * @wcqe: Pointer to driver response CQE object.
3017 * The function is called from SLI ring event handler with no
3018 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3019 * The function frees memory resources used for the NVME commands.
3022 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3023 struct lpfc_wcqe_complete *wcqe)
3025 struct lpfc_nvmet_rcv_ctx *ctxp;
3026 struct lpfc_nvmet_tgtport *tgtp;
3027 unsigned long flags;
3029 bool released = false;
3031 ctxp = cmdwqe->context2;
3032 result = wcqe->parameter;
3035 /* if context is clear, related io alrady complete */
3036 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3037 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3038 wcqe->word0, wcqe->total_data_placed,
3039 result, wcqe->word3);
3043 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3044 spin_lock_irqsave(&ctxp->ctxlock, flags);
3045 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
3046 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3049 if (ctxp->state != LPFC_NVMET_STE_ABORT) {
3050 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3051 "6112 ABTS Wrong state:%d oxid x%x\n",
3052 ctxp->state, ctxp->oxid);
3055 /* Check if we already received a free context call
3056 * and we have completed processing an abort situation.
3058 ctxp->state = LPFC_NVMET_STE_DONE;
3059 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
3060 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
3061 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3062 list_del_init(&ctxp->list);
3063 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3066 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3067 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3068 atomic_inc(&tgtp->xmt_abort_rsp);
3070 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3071 "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3072 "WCQE: %08x %08x %08x %08x\n",
3073 ctxp->oxid, ctxp->flag, released,
3074 wcqe->word0, wcqe->total_data_placed,
3075 result, wcqe->word3);
3077 cmdwqe->context2 = NULL;
3078 cmdwqe->context3 = NULL;
3080 * if transport has released ctx, then can reuse it. Otherwise,
3081 * will be recycled by transport release call.
3084 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3086 /* Since iaab/iaar are NOT set, there is no work left.
3087 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
3088 * should have been called already.
3093 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3094 * @phba: Pointer to HBA context object.
3095 * @cmdwqe: Pointer to driver command WQE object.
3096 * @wcqe: Pointer to driver response CQE object.
3098 * The function is called from SLI ring event handler with no
3099 * lock held. This function is the completion handler for NVME ABTS for LS cmds
3100 * The function frees memory resources used for the NVME commands.
3103 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3104 struct lpfc_wcqe_complete *wcqe)
3106 struct lpfc_nvmet_rcv_ctx *ctxp;
3107 struct lpfc_nvmet_tgtport *tgtp;
3110 ctxp = cmdwqe->context2;
3111 result = wcqe->parameter;
3113 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3114 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3116 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3117 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
3118 ctxp, wcqe->word0, wcqe->total_data_placed,
3119 result, wcqe->word3);
3122 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3123 "6415 NVMET LS Abort No ctx: WCQE: "
3124 "%08x %08x %08x %08x\n",
3125 wcqe->word0, wcqe->total_data_placed,
3126 result, wcqe->word3);
3128 lpfc_sli_release_iocbq(phba, cmdwqe);
3132 if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
3133 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3134 "6416 NVMET LS abort cmpl state mismatch: "
3135 "oxid x%x: %d %d\n",
3136 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3139 cmdwqe->context2 = NULL;
3140 cmdwqe->context3 = NULL;
3141 lpfc_sli_release_iocbq(phba, cmdwqe);
3146 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3147 struct lpfc_nvmet_rcv_ctx *ctxp,
3148 uint32_t sid, uint16_t xri)
3150 struct lpfc_nvmet_tgtport *tgtp;
3151 struct lpfc_iocbq *abts_wqeq;
3152 union lpfc_wqe128 *wqe_abts;
3153 struct lpfc_nodelist *ndlp;
3155 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3156 "6067 ABTS: sid %x xri x%x/x%x\n",
3157 sid, xri, ctxp->wqeq->sli4_xritag);
3159 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3161 ndlp = lpfc_findnode_did(phba->pport, sid);
3162 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3163 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3164 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3165 atomic_inc(&tgtp->xmt_abort_rsp_error);
3166 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3167 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3168 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3170 /* No failure to an ABTS request. */
3174 abts_wqeq = ctxp->wqeq;
3175 wqe_abts = &abts_wqeq->wqe;
3178 * Since we zero the whole WQE, we need to ensure we set the WQE fields
3179 * that were initialized in lpfc_sli4_nvmet_alloc.
3181 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3184 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3185 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3186 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3187 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3188 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3191 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3192 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3193 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3194 abts_wqeq->sli4_xritag);
3197 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3198 CMD_XMIT_SEQUENCE64_WQE);
3199 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3200 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3201 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3204 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3207 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3208 /* Needs to be set by caller */
3209 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3212 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
3213 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3214 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3215 LPFC_WQE_LENLOC_WORD12);
3216 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3217 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3220 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3221 LPFC_WQE_CQ_ID_DEFAULT);
3222 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3225 abts_wqeq->vport = phba->pport;
3226 abts_wqeq->context1 = ndlp;
3227 abts_wqeq->context2 = ctxp;
3228 abts_wqeq->context3 = NULL;
3229 abts_wqeq->rsvd2 = 0;
3230 /* hba_wqidx should already be setup from command we are aborting */
3231 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3232 abts_wqeq->iocb.ulpLe = 1;
3234 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3235 "6069 Issue ABTS to xri x%x reqtag x%x\n",
3236 xri, abts_wqeq->iotag);
3241 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3242 struct lpfc_nvmet_rcv_ctx *ctxp,
3243 uint32_t sid, uint16_t xri)
3245 struct lpfc_nvmet_tgtport *tgtp;
3246 struct lpfc_iocbq *abts_wqeq;
3247 union lpfc_wqe128 *abts_wqe;
3248 struct lpfc_nodelist *ndlp;
3249 unsigned long flags;
3252 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3254 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3255 ctxp->wqeq->hba_wqidx = 0;
3258 ndlp = lpfc_findnode_did(phba->pport, sid);
3259 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3260 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3261 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3262 atomic_inc(&tgtp->xmt_abort_rsp_error);
3263 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3264 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3265 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3267 /* No failure to an ABTS request. */
3268 spin_lock_irqsave(&ctxp->ctxlock, flags);
3269 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3270 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3274 /* Issue ABTS for this WQE based on iotag */
3275 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3276 spin_lock_irqsave(&ctxp->ctxlock, flags);
3277 if (!ctxp->abort_wqeq) {
3278 atomic_inc(&tgtp->xmt_abort_rsp_error);
3279 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3280 "6161 ABORT failed: No wqeqs: "
3281 "xri: x%x\n", ctxp->oxid);
3282 /* No failure to an ABTS request. */
3283 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3284 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3287 abts_wqeq = ctxp->abort_wqeq;
3288 abts_wqe = &abts_wqeq->wqe;
3289 ctxp->state = LPFC_NVMET_STE_ABORT;
3290 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3292 /* Announce entry to new IO submit field. */
3293 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3294 "6162 ABORT Request to rport DID x%06x "
3295 "for xri x%x x%x\n",
3296 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3298 /* If the hba is getting reset, this flag is set. It is
3299 * cleared when the reset is complete and rings reestablished.
3301 spin_lock_irqsave(&phba->hbalock, flags);
3302 /* driver queued commands are in process of being flushed */
3303 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
3304 spin_unlock_irqrestore(&phba->hbalock, flags);
3305 atomic_inc(&tgtp->xmt_abort_rsp_error);
3306 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3307 "6163 Driver in reset cleanup - flushing "
3308 "NVME Req now. hba_flag x%x oxid x%x\n",
3309 phba->hba_flag, ctxp->oxid);
3310 lpfc_sli_release_iocbq(phba, abts_wqeq);
3311 spin_lock_irqsave(&ctxp->ctxlock, flags);
3312 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3313 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3317 /* Outstanding abort is in progress */
3318 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3319 spin_unlock_irqrestore(&phba->hbalock, flags);
3320 atomic_inc(&tgtp->xmt_abort_rsp_error);
3321 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3322 "6164 Outstanding NVME I/O Abort Request "
3323 "still pending on oxid x%x\n",
3325 lpfc_sli_release_iocbq(phba, abts_wqeq);
3326 spin_lock_irqsave(&ctxp->ctxlock, flags);
3327 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3328 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3332 /* Ready - mark outstanding as aborted by driver. */
3333 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3335 /* WQEs are reused. Clear stale data and set key fields to
3336 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3338 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
3341 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3344 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3345 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3347 /* word 8 - tell the FW to abort the IO associated with this
3348 * outstanding exchange ID.
3350 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3352 /* word 9 - this is the iotag for the abts_wqe completion. */
3353 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3357 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3358 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3361 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3362 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3363 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3365 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3366 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3367 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3368 abts_wqeq->iocb_cmpl = 0;
3369 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3370 abts_wqeq->context2 = ctxp;
3371 abts_wqeq->vport = phba->pport;
3373 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3375 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3376 spin_unlock_irqrestore(&phba->hbalock, flags);
3377 if (rc == WQE_SUCCESS) {
3378 atomic_inc(&tgtp->xmt_abort_sol);
3382 atomic_inc(&tgtp->xmt_abort_rsp_error);
3383 spin_lock_irqsave(&ctxp->ctxlock, flags);
3384 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3385 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3386 lpfc_sli_release_iocbq(phba, abts_wqeq);
3387 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3388 "6166 Failed ABORT issue_wqe with status x%x "
3395 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3396 struct lpfc_nvmet_rcv_ctx *ctxp,
3397 uint32_t sid, uint16_t xri)
3399 struct lpfc_nvmet_tgtport *tgtp;
3400 struct lpfc_iocbq *abts_wqeq;
3401 unsigned long flags;
3402 bool released = false;
3405 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3407 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3408 ctxp->wqeq->hba_wqidx = 0;
3411 if (ctxp->state == LPFC_NVMET_STE_FREE) {
3412 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3413 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3414 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3418 ctxp->state = LPFC_NVMET_STE_ABORT;
3420 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3424 spin_lock_irqsave(&phba->hbalock, flags);
3425 abts_wqeq = ctxp->wqeq;
3426 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3427 abts_wqeq->iocb_cmpl = NULL;
3428 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3430 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3432 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3433 spin_unlock_irqrestore(&phba->hbalock, flags);
3434 if (rc == WQE_SUCCESS) {
3439 spin_lock_irqsave(&ctxp->ctxlock, flags);
3440 if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
3441 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3442 list_del_init(&ctxp->list);
3443 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3446 ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3447 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3449 atomic_inc(&tgtp->xmt_abort_rsp_error);
3450 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3451 "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3453 ctxp->oxid, rc, released);
3455 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3460 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3461 struct lpfc_nvmet_rcv_ctx *ctxp,
3462 uint32_t sid, uint16_t xri)
3464 struct lpfc_nvmet_tgtport *tgtp;
3465 struct lpfc_iocbq *abts_wqeq;
3466 unsigned long flags;
3469 if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3470 (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3471 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3474 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3475 "6418 NVMET LS abort state mismatch "
3477 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3478 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3481 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3483 /* Issue ABTS for this WQE based on iotag */
3484 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3486 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3487 "6068 Abort failed: No wqeqs: "
3489 /* No failure to an ABTS request. */
3494 abts_wqeq = ctxp->wqeq;
3496 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3501 spin_lock_irqsave(&phba->hbalock, flags);
3502 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3503 abts_wqeq->iocb_cmpl = 0;
3504 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
3505 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3506 spin_unlock_irqrestore(&phba->hbalock, flags);
3507 if (rc == WQE_SUCCESS) {
3508 atomic_inc(&tgtp->xmt_abort_unsol);
3512 atomic_inc(&tgtp->xmt_abort_rsp_error);
3513 abts_wqeq->context2 = NULL;
3514 abts_wqeq->context3 = NULL;
3515 lpfc_sli_release_iocbq(phba, abts_wqeq);
3517 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3518 "6056 Failed to Issue ABTS. Status x%x\n", rc);